summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2021-02-11 19:03:35 +0100
committerMichaël Zasso <targos@protonmail.com>2021-02-11 19:09:18 +0100
commitc7b329225126ad3b9eeb2408e0f0801f1aea5eb1 (patch)
tree193c193111d5f302031ad345bc94d17a3f67bf66 /deps/v8/src
parent6ea9af9906cd74ed07ca05cf6aa44382025a6044 (diff)
downloadnode-new-c7b329225126ad3b9eeb2408e0f0801f1aea5eb1.tar.gz
deps: update V8 to 8.8.278.17
PR-URL: https://github.com/nodejs/node/pull/36139 Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Myles Borins <myles.borins@gmail.com> Reviewed-By: Shelley Vohr <codebytere@gmail.com>
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/DIR_METADATA11
-rw-r--r--deps/v8/src/OWNERS2
-rw-r--r--deps/v8/src/api/DIR_METADATA11
-rw-r--r--deps/v8/src/api/OWNERS2
-rw-r--r--deps/v8/src/api/api-natives.cc3
-rw-r--r--deps/v8/src/api/api.cc226
-rw-r--r--deps/v8/src/asmjs/DIR_METADATA11
-rw-r--r--deps/v8/src/asmjs/OWNERS2
-rw-r--r--deps/v8/src/ast/DIR_METADATA11
-rw-r--r--deps/v8/src/ast/OWNERS2
-rw-r--r--deps/v8/src/ast/ast-function-literal-id-reindexer.cc9
-rw-r--r--deps/v8/src/ast/ast-source-ranges.h19
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc67
-rw-r--r--deps/v8/src/ast/ast-value-factory.h37
-rw-r--r--deps/v8/src/ast/ast.cc8
-rw-r--r--deps/v8/src/ast/ast.h12
-rw-r--r--deps/v8/src/ast/modules.cc93
-rw-r--r--deps/v8/src/ast/modules.h63
-rw-r--r--deps/v8/src/ast/prettyprinter.cc2
-rw-r--r--deps/v8/src/ast/scopes.h2
-rw-r--r--deps/v8/src/base/DIR_METADATA11
-rw-r--r--deps/v8/src/base/OWNERS2
-rw-r--r--deps/v8/src/base/bounded-page-allocator.h4
-rw-r--r--deps/v8/src/base/build_config.h4
-rw-r--r--deps/v8/src/base/debug/stack_trace_posix.cc11
-rw-r--r--deps/v8/src/base/hashmap-entry.h52
-rw-r--r--deps/v8/src/base/hashmap.h46
-rw-r--r--deps/v8/src/base/lazy-instance.h5
-rw-r--r--deps/v8/src/base/macros.h4
-rw-r--r--deps/v8/src/base/platform/DIR_METADATA11
-rw-r--r--deps/v8/src/base/platform/OWNERS2
-rw-r--r--deps/v8/src/base/platform/condition-variable.h4
-rw-r--r--deps/v8/src/base/platform/mutex.h20
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc2
-rw-r--r--deps/v8/src/base/platform/platform-freebsd.cc2
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc12
-rw-r--r--deps/v8/src/base/platform/platform-macos.cc2
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc16
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc4
-rw-r--r--deps/v8/src/base/platform/platform.h50
-rw-r--r--deps/v8/src/base/platform/semaphore.h4
-rw-r--r--deps/v8/src/base/platform/wrappers.h31
-rw-r--r--deps/v8/src/base/platform/wrappers_starboard.cc31
-rw-r--r--deps/v8/src/base/platform/wrappers_std.cc34
-rw-r--r--deps/v8/src/base/region-allocator.h4
-rw-r--r--deps/v8/src/base/ring-buffer.h5
-rw-r--r--deps/v8/src/base/safe_conversions.h372
-rw-r--r--deps/v8/src/base/safe_conversions_arm_impl.h60
-rw-r--r--deps/v8/src/base/safe_conversions_impl.h822
-rw-r--r--deps/v8/src/base/threaded-list.h4
-rw-r--r--deps/v8/src/builtins/DIR_METADATA11
-rw-r--r--deps/v8/src/builtins/OWNERS2
-rw-r--r--deps/v8/src/builtins/accessors.cc11
-rw-r--r--deps/v8/src/builtins/accessors.h4
-rw-r--r--deps/v8/src/builtins/aggregate-error.tq4
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc1032
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc1364
-rw-r--r--deps/v8/src/builtins/base.tq28
-rw-r--r--deps/v8/src/builtins/builtins-api.cc16
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc195
-rw-r--r--deps/v8/src/builtins/builtins-array.cc4
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc10
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc44
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc86
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-bigint-gen.cc15
-rw-r--r--deps/v8/src/builtins/builtins-bigint.tq (renamed from deps/v8/src/builtins/bigint.tq)18
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc111
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc16
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc192
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc82
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc14
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc2
-rw-r--r--deps/v8/src/builtins/builtins-date-gen.cc82
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h11
-rw-r--r--deps/v8/src/builtins/builtins-function.cc10
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc20
-rw-r--r--deps/v8/src/builtins/builtins-global-gen.cc8
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc104
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc177
-rw-r--r--deps/v8/src/builtins/builtins-intl-gen.cc14
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc46
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.cc109
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-microtask-queue-gen.cc13
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc75
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc104
-rw-r--r--deps/v8/src/builtins/builtins-object.cc4
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc16
-rw-r--r--deps/v8/src/builtins/builtins-reflect.cc6
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc102
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc164
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc282
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h28
-rw-r--r--deps/v8/src/builtins/builtins-string.cc2
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc49
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h1
-rw-r--r--deps/v8/src/builtins/builtins-utils-gen.h51
-rw-r--r--deps/v8/src/builtins/builtins-utils-inl.h8
-rw-r--r--deps/v8/src/builtins/builtins-utils.h5
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc61
-rw-r--r--deps/v8/src/builtins/builtins.cc5
-rw-r--r--deps/v8/src/builtins/cast.tq17
-rw-r--r--deps/v8/src/builtins/constants-table-builder.cc15
-rw-r--r--deps/v8/src/builtins/convert.tq5
-rw-r--r--deps/v8/src/builtins/generate-bytecodes-builtins-list.cc34
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc1049
-rw-r--r--deps/v8/src/builtins/ic-dynamic-map-checks.tq155
-rw-r--r--deps/v8/src/builtins/ic.tq4
-rw-r--r--deps/v8/src/builtins/internal.tq43
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc777
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc769
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc755
-rw-r--r--deps/v8/src/builtins/regexp.tq10
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc760
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc3
-rw-r--r--deps/v8/src/builtins/string-trim.tq168
-rw-r--r--deps/v8/src/builtins/torque-internal.tq5
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq1
-rw-r--r--deps/v8/src/builtins/typed-array-sort.tq8
-rw-r--r--deps/v8/src/builtins/typed-array.tq4
-rw-r--r--deps/v8/src/builtins/wasm.tq35
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc1117
-rw-r--r--deps/v8/src/codegen/DIR_METADATA11
-rw-r--r--deps/v8/src/codegen/OWNERS2
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc53
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h4
-rw-r--r--deps/v8/src/codegen/arm/interface-descriptors-arm.cc48
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc180
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h27
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc15
-rw-r--r--deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc48
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc193
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h20
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h2
-rw-r--r--deps/v8/src/codegen/assembler.cc13
-rw-r--r--deps/v8/src/codegen/assembler.h11
-rw-r--r--deps/v8/src/codegen/bailout-reason.h2
-rw-r--r--deps/v8/src/codegen/code-desc.h34
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc922
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h532
-rw-r--r--deps/v8/src/codegen/compilation-cache.cc12
-rw-r--r--deps/v8/src/codegen/compilation-cache.h2
-rw-r--r--deps/v8/src/codegen/compiler.cc222
-rw-r--r--deps/v8/src/codegen/compiler.h30
-rw-r--r--deps/v8/src/codegen/external-reference.cc17
-rw-r--r--deps/v8/src/codegen/external-reference.h25
-rw-r--r--deps/v8/src/codegen/handler-table.cc2
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc16
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h1
-rw-r--r--deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc48
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc169
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h26
-rw-r--r--deps/v8/src/codegen/interface-descriptors.cc40
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h33
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc10
-rw-r--r--deps/v8/src/codegen/mips/interface-descriptors-mips.cc48
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc139
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h28
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc14
-rw-r--r--deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc48
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc145
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h28
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc7
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h9
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc20
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h2
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h48
-rw-r--r--deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc48
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc45
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h15
-rw-r--r--deps/v8/src/codegen/ppc/register-ppc.h6
-rw-r--r--deps/v8/src/codegen/register-configuration.cc4
-rw-r--r--deps/v8/src/codegen/register-configuration.h5
-rw-r--r--deps/v8/src/codegen/reloc-info.cc2
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc9
-rw-r--r--deps/v8/src/codegen/s390/interface-descriptors-s390.cc48
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc43
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h15
-rw-r--r--deps/v8/src/codegen/s390/register-s390.h6
-rw-r--r--deps/v8/src/codegen/safepoint-table.cc2
-rw-r--r--deps/v8/src/codegen/tnode.h19
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc117
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h101
-rw-r--r--deps/v8/src/codegen/x64/interface-descriptors-x64.cc75
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc360
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h62
-rw-r--r--deps/v8/src/common/DIR_METADATA11
-rw-r--r--deps/v8/src/common/OWNERS2
-rw-r--r--deps/v8/src/common/assert-scope.cc2
-rw-r--r--deps/v8/src/common/assert-scope.h24
-rw-r--r--deps/v8/src/common/external-pointer-inl.h93
-rw-r--r--deps/v8/src/common/external-pointer.h43
-rw-r--r--deps/v8/src/common/globals.h194
-rw-r--r--deps/v8/src/common/message-template.h13
-rw-r--r--deps/v8/src/common/ptr-compr-inl.h43
-rw-r--r--deps/v8/src/common/ptr-compr.h4
-rw-r--r--deps/v8/src/compiler-dispatcher/DIR_METADATA11
-rw-r--r--deps/v8/src/compiler-dispatcher/OWNERS2
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc2
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc15
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h7
-rw-r--r--deps/v8/src/compiler/DIR_METADATA11
-rw-r--r--deps/v8/src/compiler/OWNERS3
-rw-r--r--deps/v8/src/compiler/access-builder.cc84
-rw-r--r--deps/v8/src/compiler/access-builder.h3
-rw-r--r--deps/v8/src/compiler/access-info.cc67
-rw-r--r--deps/v8/src/compiler/access-info.h26
-rw-r--r--deps/v8/src/compiler/add-type-assertions-reducer.h5
-rw-r--r--deps/v8/src/compiler/allocation-builder-inl.h3
-rw-r--r--deps/v8/src/compiler/allocation-builder.h2
-rw-r--r--deps/v8/src/compiler/backend/DIR_METADATA11
-rw-r--r--deps/v8/src/compiler/backend/OWNERS2
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc131
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-codes-arm.h38
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc38
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc132
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc361
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h68
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc64
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc410
-rw-r--r--deps/v8/src/compiler/backend/code-generator-impl.h11
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc20
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h10
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc210
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h52
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc52
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc136
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h200
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc244
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h6
-rw-r--r--deps/v8/src/compiler/backend/instruction.h36
-rw-r--r--deps/v8/src/compiler/backend/mid-tier-register-allocator.cc843
-rw-r--r--deps/v8/src/compiler/backend/mid-tier-register-allocator.h9
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc124
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-codes-mips.h37
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc37
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc132
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc181
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h39
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc39
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc190
-rw-r--r--deps/v8/src/compiler/backend/move-optimizer.h5
-rw-r--r--deps/v8/src/compiler/backend/ppc/OWNERS1
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc304
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h20
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc20
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc109
-rw-r--r--deps/v8/src/compiler/backend/register-allocator-verifier.h25
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc41
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.h75
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc181
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-codes-s390.h27
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc27
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc74
-rw-r--r--deps/v8/src/compiler/backend/spill-placer.h5
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc793
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h69
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc69
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc641
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h4
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc230
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h2
-rw-r--r--deps/v8/src/compiler/bytecode-liveness-map.h4
-rw-r--r--deps/v8/src/compiler/c-linkage.cc3
-rw-r--r--deps/v8/src/compiler/code-assembler.cc65
-rw-r--r--deps/v8/src/compiler/code-assembler.h137
-rw-r--r--deps/v8/src/compiler/common-node-cache.h5
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc19
-rw-r--r--deps/v8/src/compiler/common-operator.cc2
-rw-r--r--deps/v8/src/compiler/common-operator.h4
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc12
-rw-r--r--deps/v8/src/compiler/compiler-source-position-table.h7
-rw-r--r--deps/v8/src/compiler/constant-folding-reducer.h4
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.cc8
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.h4
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.cc6
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.h4
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.h4
-rw-r--r--deps/v8/src/compiler/decompression-optimizer.h4
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc384
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h4
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc33
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h4
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc15
-rw-r--r--deps/v8/src/compiler/feedback-source.cc3
-rw-r--r--deps/v8/src/compiler/feedback-source.h1
-rw-r--r--deps/v8/src/compiler/frame.h4
-rw-r--r--deps/v8/src/compiler/globals.h9
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc16
-rw-r--r--deps/v8/src/compiler/graph-assembler.h6
-rw-r--r--deps/v8/src/compiler/graph-reducer.h5
-rw-r--r--deps/v8/src/compiler/graph-trimmer.h4
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc12
-rw-r--r--deps/v8/src/compiler/graph.h8
-rw-r--r--deps/v8/src/compiler/heap-refs.h18
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc4
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc110
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h4
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc3
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc125
-rw-r--r--deps/v8/src/compiler/js-graph.h5
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc335
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h70
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc6
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc4
-rw-r--r--deps/v8/src/compiler/js-inlining.cc21
-rw-r--r--deps/v8/src/compiler/js-inlining.h2
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc2
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc490
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h58
-rw-r--r--deps/v8/src/compiler/js-operator.cc67
-rw-r--r--deps/v8/src/compiler/js-operator.h80
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc10
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.h9
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc120
-rw-r--r--deps/v8/src/compiler/linkage.cc4
-rw-r--r--deps/v8/src/compiler/linkage.h13
-rw-r--r--deps/v8/src/compiler/load-elimination.h4
-rw-r--r--deps/v8/src/compiler/machine-graph.h4
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc494
-rw-r--r--deps/v8/src/compiler/machine-operator.cc290
-rw-r--r--deps/v8/src/compiler/machine-operator.h124
-rw-r--r--deps/v8/src/compiler/map-inference.cc6
-rw-r--r--deps/v8/src/compiler/memory-lowering.cc43
-rw-r--r--deps/v8/src/compiler/memory-lowering.h7
-rw-r--r--deps/v8/src/compiler/node-cache.h4
-rw-r--r--deps/v8/src/compiler/node-marker.h4
-rw-r--r--deps/v8/src/compiler/node-matchers.h206
-rw-r--r--deps/v8/src/compiler/node-origin-table.h12
-rw-r--r--deps/v8/src/compiler/node-properties.cc28
-rw-r--r--deps/v8/src/compiler/node-properties.h31
-rw-r--r--deps/v8/src/compiler/node.cc4
-rw-r--r--deps/v8/src/compiler/node.h4
-rw-r--r--deps/v8/src/compiler/opcodes.h61
-rw-r--r--deps/v8/src/compiler/operator-properties.cc11
-rw-r--r--deps/v8/src/compiler/operator-properties.h6
-rw-r--r--deps/v8/src/compiler/operator.h4
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.h13
-rw-r--r--deps/v8/src/compiler/pipeline.cc156
-rw-r--r--deps/v8/src/compiler/processed-feedback.h8
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc74
-rw-r--r--deps/v8/src/compiler/property-access-builder.h20
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc15
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h17
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.h4
-rw-r--r--deps/v8/src/compiler/representation-change.cc6
-rw-r--r--deps/v8/src/compiler/schedule.h8
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc161
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc653
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h2
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc60
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h4
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc52
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h5
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc13
-rw-r--r--deps/v8/src/compiler/simplified-operator.h44
-rw-r--r--deps/v8/src/compiler/type-narrowing-reducer.h4
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc2
-rw-r--r--deps/v8/src/compiler/typed-optimization.h4
-rw-r--r--deps/v8/src/compiler/typer.cc6
-rw-r--r--deps/v8/src/compiler/typer.h4
-rw-r--r--deps/v8/src/compiler/types.cc47
-rw-r--r--deps/v8/src/compiler/verifier.cc2
-rw-r--r--deps/v8/src/compiler/verifier.h4
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc1187
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h50
-rw-r--r--deps/v8/src/compiler/zone-stats.h12
-rw-r--r--deps/v8/src/d8/d8.cc48
-rw-r--r--deps/v8/src/d8/d8.h3
-rw-r--r--deps/v8/src/date/DIR_METADATA11
-rw-r--r--deps/v8/src/date/OWNERS2
-rw-r--r--deps/v8/src/debug/DIR_METADATA11
-rw-r--r--deps/v8/src/debug/OWNERS2
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc66
-rw-r--r--deps/v8/src/debug/debug-evaluate.h5
-rw-r--r--deps/v8/src/debug/debug-frames.cc10
-rw-r--r--deps/v8/src/debug/debug-frames.h8
-rw-r--r--deps/v8/src/debug/debug-interface.h10
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc24
-rw-r--r--deps/v8/src/debug/debug.cc12
-rw-r--r--deps/v8/src/debug/liveedit.cc2
-rw-r--r--deps/v8/src/debug/ppc/OWNERS1
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/DIR_METADATA11
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/OWNERS2
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc4
-rw-r--r--deps/v8/src/deoptimizer/DIR_METADATA11
-rw-r--r--deps/v8/src/deoptimizer/OWNERS2
-rw-r--r--deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc240
-rw-r--r--deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc287
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc253
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.h83
-rw-r--r--deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc195
-rw-r--r--deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc209
-rw-r--r--deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc209
-rw-r--r--deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc236
-rw-r--r--deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc234
-rw-r--r--deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc211
-rw-r--r--deps/v8/src/diagnostics/arm/disasm-arm.cc1343
-rw-r--r--deps/v8/src/diagnostics/arm/unwinder-arm.cc37
-rw-r--r--deps/v8/src/diagnostics/arm64/unwinder-arm64.cc12
-rw-r--r--deps/v8/src/diagnostics/basic-block-profiler.cc2
-rw-r--r--deps/v8/src/diagnostics/basic-block-profiler.h4
-rw-r--r--deps/v8/src/diagnostics/disassembler.cc5
-rw-r--r--deps/v8/src/diagnostics/ia32/disasm-ia32.cc38
-rw-r--r--deps/v8/src/diagnostics/ia32/unwinder-ia32.cc12
-rw-r--r--deps/v8/src/diagnostics/mips/unwinder-mips.cc12
-rw-r--r--deps/v8/src/diagnostics/mips64/unwinder-mips64.cc12
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc79
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc138
-rw-r--r--deps/v8/src/diagnostics/perf-jit.cc11
-rw-r--r--deps/v8/src/diagnostics/ppc/unwinder-ppc.cc8
-rw-r--r--deps/v8/src/diagnostics/s390/unwinder-s390.cc8
-rw-r--r--deps/v8/src/diagnostics/unwinder.cc28
-rw-r--r--deps/v8/src/diagnostics/unwinder.h17
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc31
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc906
-rw-r--r--deps/v8/src/diagnostics/x64/unwinder-x64.cc12
-rw-r--r--deps/v8/src/execution/DIR_METADATA11
-rw-r--r--deps/v8/src/execution/OWNERS2
-rw-r--r--deps/v8/src/execution/arguments.h6
-rw-r--r--deps/v8/src/execution/arm/frame-constants-arm.h8
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.cc2571
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.h7
-rw-r--r--deps/v8/src/execution/arm64/frame-constants-arm64.h2
-rw-r--r--deps/v8/src/execution/arm64/pointer-auth-arm64.cc3
-rw-r--r--deps/v8/src/execution/execution.cc16
-rw-r--r--deps/v8/src/execution/external-pointer-table.cc22
-rw-r--r--deps/v8/src/execution/external-pointer-table.h80
-rw-r--r--deps/v8/src/execution/frame-constants.h36
-rw-r--r--deps/v8/src/execution/frames-inl.h98
-rw-r--r--deps/v8/src/execution/frames.cc164
-rw-r--r--deps/v8/src/execution/frames.h492
-rw-r--r--deps/v8/src/execution/isolate-data.h22
-rw-r--r--deps/v8/src/execution/isolate-inl.h4
-rw-r--r--deps/v8/src/execution/isolate-utils-inl.h31
-rw-r--r--deps/v8/src/execution/isolate-utils.h2
-rw-r--r--deps/v8/src/execution/isolate.cc281
-rw-r--r--deps/v8/src/execution/isolate.h138
-rw-r--r--deps/v8/src/execution/local-isolate-inl.h4
-rw-r--r--deps/v8/src/execution/local-isolate.cc16
-rw-r--r--deps/v8/src/execution/local-isolate.h22
-rw-r--r--deps/v8/src/execution/messages.cc89
-rw-r--r--deps/v8/src/execution/messages.h27
-rw-r--r--deps/v8/src/execution/ppc/frame-constants-ppc.h4
-rw-r--r--deps/v8/src/execution/runtime-profiler.cc101
-rw-r--r--deps/v8/src/execution/runtime-profiler.h11
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.cc25
-rw-r--r--deps/v8/src/extensions/gc-extension.cc2
-rw-r--r--deps/v8/src/flags/flag-definitions.h100
-rw-r--r--deps/v8/src/handles/DIR_METADATA11
-rw-r--r--deps/v8/src/handles/OWNERS2
-rw-r--r--deps/v8/src/handles/global-handles.cc89
-rw-r--r--deps/v8/src/handles/global-handles.h52
-rw-r--r--deps/v8/src/handles/handles-inl.h10
-rw-r--r--deps/v8/src/handles/handles.cc9
-rw-r--r--deps/v8/src/handles/handles.h9
-rw-r--r--deps/v8/src/handles/maybe-handles-inl.h34
-rw-r--r--deps/v8/src/handles/maybe-handles.h13
-rw-r--r--deps/v8/src/heap/DIR_METADATA11
-rw-r--r--deps/v8/src/heap/OWNERS2
-rw-r--r--deps/v8/src/heap/array-buffer-sweeper.cc137
-rw-r--r--deps/v8/src/heap/array-buffer-sweeper.h45
-rw-r--r--deps/v8/src/heap/base/stack.cc16
-rw-r--r--deps/v8/src/heap/base/worklist.h14
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.h2
-rw-r--r--deps/v8/src/heap/code-object-registry.cc70
-rw-r--r--deps/v8/src/heap/code-object-registry.h6
-rw-r--r--deps/v8/src/heap/code-stats.cc7
-rw-r--r--deps/v8/src/heap/collection-barrier.cc100
-rw-r--r--deps/v8/src/heap/collection-barrier.h93
-rw-r--r--deps/v8/src/heap/concurrent-allocator.cc4
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc209
-rw-r--r--deps/v8/src/heap/concurrent-marking.h58
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.cc91
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.h8
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-snapshot.cc713
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-snapshot.h29
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h13
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc70
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h30
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc92
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h66
-rw-r--r--deps/v8/src/heap/cppgc/compaction-worklists.cc14
-rw-r--r--deps/v8/src/heap/cppgc/compaction-worklists.h35
-rw-r--r--deps/v8/src/heap/cppgc/compactor.cc505
-rw-r--r--deps/v8/src/heap/cppgc/compactor.h56
-rw-r--r--deps/v8/src/heap/cppgc/concurrent-marker.cc246
-rw-r--r--deps/v8/src/heap/cppgc/concurrent-marker.h76
-rw-r--r--deps/v8/src/heap/cppgc/default-job.h186
-rw-r--r--deps/v8/src/heap/cppgc/default-platform.cc143
-rw-r--r--deps/v8/src/heap/cppgc/garbage-collector.h2
-rw-r--r--deps/v8/src/heap/cppgc/gc-info-table.h3
-rw-r--r--deps/v8/src/heap/cppgc/gc-info.cc5
-rw-r--r--deps/v8/src/heap/cppgc/globals.h3
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc11
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.h23
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.cc10
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.h42
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.cc2
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.h43
-rw-r--r--deps/v8/src/heap/cppgc/heap-space.cc14
-rw-r--r--deps/v8/src/heap/cppgc/heap-space.h8
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc18
-rw-r--r--deps/v8/src/heap/cppgc/incremental-marking-schedule.cc25
-rw-r--r--deps/v8/src/heap/cppgc/incremental-marking-schedule.h10
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc201
-rw-r--r--deps/v8/src/heap/cppgc/marker.h23
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.cc22
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.h359
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.cc96
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.h49
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.cc83
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.h59
-rw-r--r--deps/v8/src/heap/cppgc/marking-worklists.cc7
-rw-r--r--deps/v8/src/heap/cppgc/marking-worklists.h139
-rw-r--r--deps/v8/src/heap/cppgc/name-trait.cc41
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.h12
-rw-r--r--deps/v8/src/heap/cppgc/object-start-bitmap.h58
-rw-r--r--deps/v8/src/heap/cppgc/persistent-node.cc9
-rw-r--r--deps/v8/src/heap/cppgc/pointer-policies.cc12
-rw-r--r--deps/v8/src/heap/cppgc/process-heap.cc4
-rw-r--r--deps/v8/src/heap/cppgc/process-heap.h18
-rw-r--r--deps/v8/src/heap/cppgc/raw-heap.cc13
-rw-r--r--deps/v8/src/heap/cppgc/raw-heap.h3
-rw-r--r--deps/v8/src/heap/cppgc/sanitizers.h31
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc59
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.h16
-rw-r--r--deps/v8/src/heap/cppgc/trace-trait.cc10
-rw-r--r--deps/v8/src/heap/cppgc/visitor.cc17
-rw-r--r--deps/v8/src/heap/cppgc/visitor.h16
-rw-r--r--deps/v8/src/heap/cppgc/write-barrier.cc9
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc30
-rw-r--r--deps/v8/src/heap/embedder-tracing.h19
-rw-r--r--deps/v8/src/heap/factory-base.cc54
-rw-r--r--deps/v8/src/heap/factory-base.h2
-rw-r--r--deps/v8/src/heap/factory.cc525
-rw-r--r--deps/v8/src/heap/factory.h70
-rw-r--r--deps/v8/src/heap/free-list.cc44
-rw-r--r--deps/v8/src/heap/free-list.h25
-rw-r--r--deps/v8/src/heap/gc-tracer.cc31
-rw-r--r--deps/v8/src/heap/heap-inl.h44
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h1
-rw-r--r--deps/v8/src/heap/heap.cc425
-rw-r--r--deps/v8/src/heap/heap.h106
-rw-r--r--deps/v8/src/heap/incremental-marking.cc108
-rw-r--r--deps/v8/src/heap/incremental-marking.h3
-rw-r--r--deps/v8/src/heap/local-heap-inl.h6
-rw-r--r--deps/v8/src/heap/local-heap.cc58
-rw-r--r--deps/v8/src/heap/local-heap.h19
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h4
-rw-r--r--deps/v8/src/heap/mark-compact.cc712
-rw-r--r--deps/v8/src/heap/mark-compact.h65
-rw-r--r--deps/v8/src/heap/marking-visitor-inl.h85
-rw-r--r--deps/v8/src/heap/marking-visitor.h53
-rw-r--r--deps/v8/src/heap/memory-allocator.cc87
-rw-r--r--deps/v8/src/heap/memory-allocator.h18
-rw-r--r--deps/v8/src/heap/memory-chunk-layout.cc7
-rw-r--r--deps/v8/src/heap/memory-chunk-layout.h2
-rw-r--r--deps/v8/src/heap/memory-chunk.h1
-rw-r--r--deps/v8/src/heap/memory-measurement-inl.h6
-rw-r--r--deps/v8/src/heap/memory-measurement.cc32
-rw-r--r--deps/v8/src/heap/memory-measurement.h5
-rw-r--r--deps/v8/src/heap/new-spaces.cc2
-rw-r--r--deps/v8/src/heap/object-stats.cc11
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h6
-rw-r--r--deps/v8/src/heap/objects-visiting.cc8
-rw-r--r--deps/v8/src/heap/objects-visiting.h3
-rw-r--r--deps/v8/src/heap/paged-spaces.cc61
-rw-r--r--deps/v8/src/heap/paged-spaces.h6
-rw-r--r--deps/v8/src/heap/parallel-work-item.h32
-rw-r--r--deps/v8/src/heap/read-only-heap-inl.h4
-rw-r--r--deps/v8/src/heap/read-only-heap.cc35
-rw-r--r--deps/v8/src/heap/read-only-heap.h9
-rw-r--r--deps/v8/src/heap/read-only-spaces.cc21
-rw-r--r--deps/v8/src/heap/read-only-spaces.h7
-rw-r--r--deps/v8/src/heap/safepoint.cc21
-rw-r--r--deps/v8/src/heap/safepoint.h32
-rw-r--r--deps/v8/src/heap/scavenger-inl.h18
-rw-r--r--deps/v8/src/heap/scavenger.cc212
-rw-r--r--deps/v8/src/heap/scavenger.h101
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc43
-rw-r--r--deps/v8/src/heap/spaces.h6
-rw-r--r--deps/v8/src/heap/third-party/heap-api.h5
-rw-r--r--deps/v8/src/heap/weak-object-worklists.cc172
-rw-r--r--deps/v8/src/heap/weak-object-worklists.h90
-rw-r--r--deps/v8/src/ic/DIR_METADATA11
-rw-r--r--deps/v8/src/ic/OWNERS2
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc261
-rw-r--r--deps/v8/src/ic/call-optimization.cc11
-rw-r--r--deps/v8/src/ic/handler-configuration.cc4
-rw-r--r--deps/v8/src/ic/ic-inl.h3
-rw-r--r--deps/v8/src/ic/ic.cc31
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc18
-rw-r--r--deps/v8/src/init/DIR_METADATA11
-rw-r--r--deps/v8/src/init/OWNERS2
-rw-r--r--deps/v8/src/init/bootstrapper.cc57
-rw-r--r--deps/v8/src/init/heap-symbols.h5
-rw-r--r--deps/v8/src/init/isolate-allocator.cc20
-rw-r--r--deps/v8/src/init/isolate-allocator.h10
-rw-r--r--deps/v8/src/inspector/DIR_METADATA11
-rw-r--r--deps/v8/src/inspector/OWNERS2
-rw-r--r--deps/v8/src/inspector/injected-script.cc31
-rw-r--r--deps/v8/src/inspector/remote-object-id.cc81
-rw-r--r--deps/v8/src/inspector/remote-object-id.h21
-rw-r--r--deps/v8/src/inspector/string-16.h2
-rw-r--r--deps/v8/src/inspector/v8-console.cc1
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc4
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc4
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc2
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc80
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.h9
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc12
-rw-r--r--deps/v8/src/inspector/value-mirror.cc23
-rw-r--r--deps/v8/src/interpreter/DIR_METADATA11
-rw-r--r--deps/v8/src/interpreter/OWNERS2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h5
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc9
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h6
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h6
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.h6
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h3
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc48
-rw-r--r--deps/v8/src/interpreter/bytecode-label.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.h5
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc8
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h10
-rw-r--r--deps/v8/src/interpreter/bytecode-register.cc15
-rw-r--r--deps/v8/src/interpreter/bytecodes.h1
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h5
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.h4
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc87
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h14
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc139
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc199
-rw-r--r--deps/v8/src/interpreter/interpreter.cc30
-rw-r--r--deps/v8/src/interpreter/interpreter.h4
-rw-r--r--deps/v8/src/json/DIR_METADATA11
-rw-r--r--deps/v8/src/json/OWNERS2
-rw-r--r--deps/v8/src/json/json-parser.cc18
-rw-r--r--deps/v8/src/json/json-stringifier.cc6
-rw-r--r--deps/v8/src/libplatform/DIR_METADATA11
-rw-r--r--deps/v8/src/libplatform/OWNERS2
-rw-r--r--deps/v8/src/libplatform/default-job.cc27
-rw-r--r--deps/v8/src/libplatform/default-job.h20
-rw-r--r--deps/v8/src/libsampler/DIR_METADATA11
-rw-r--r--deps/v8/src/libsampler/OWNERS2
-rw-r--r--deps/v8/src/libsampler/sampler.cc4
-rw-r--r--deps/v8/src/logging/counters-definitions.h11
-rw-r--r--deps/v8/src/logging/counters.cc17
-rw-r--r--deps/v8/src/logging/counters.h6
-rw-r--r--deps/v8/src/logging/log.cc111
-rw-r--r--deps/v8/src/logging/log.h2
-rw-r--r--deps/v8/src/logging/metrics.h7
-rw-r--r--deps/v8/src/numbers/DIR_METADATA11
-rw-r--r--deps/v8/src/numbers/OWNERS2
-rw-r--r--deps/v8/src/objects/DIR_METADATA11
-rw-r--r--deps/v8/src/objects/OWNERS2
-rw-r--r--deps/v8/src/objects/all-objects-inl.h104
-rw-r--r--deps/v8/src/objects/allocation-site-inl.h2
-rw-r--r--deps/v8/src/objects/allocation-site.h2
-rw-r--r--deps/v8/src/objects/api-callbacks-inl.h3
-rw-r--r--deps/v8/src/objects/api-callbacks.h3
-rw-r--r--deps/v8/src/objects/arguments-inl.h2
-rw-r--r--deps/v8/src/objects/arguments.h2
-rw-r--r--deps/v8/src/objects/backing-store.cc78
-rw-r--r--deps/v8/src/objects/backing-store.h4
-rw-r--r--deps/v8/src/objects/bigint-inl.h24
-rw-r--r--deps/v8/src/objects/bigint.cc14
-rw-r--r--deps/v8/src/objects/bigint.h2
-rw-r--r--deps/v8/src/objects/bigint.tq21
-rw-r--r--deps/v8/src/objects/cell-inl.h2
-rw-r--r--deps/v8/src/objects/cell.h3
-rw-r--r--deps/v8/src/objects/class-definitions-tq-deps-inl.h44
-rw-r--r--deps/v8/src/objects/code-inl.h218
-rw-r--r--deps/v8/src/objects/code-kind.h93
-rw-r--r--deps/v8/src/objects/code.cc76
-rw-r--r--deps/v8/src/objects/code.h340
-rw-r--r--deps/v8/src/objects/compilation-cache-table-inl.h (renamed from deps/v8/src/objects/compilation-cache-inl.h)9
-rw-r--r--deps/v8/src/objects/compilation-cache-table.cc447
-rw-r--r--deps/v8/src/objects/compilation-cache-table.h (renamed from deps/v8/src/objects/compilation-cache.h)62
-rw-r--r--deps/v8/src/objects/compressed-slots-inl.h18
-rw-r--r--deps/v8/src/objects/compressed-slots.h16
-rw-r--r--deps/v8/src/objects/contexts-inl.h24
-rw-r--r--deps/v8/src/objects/contexts.h7
-rw-r--r--deps/v8/src/objects/data-handler-inl.h2
-rw-r--r--deps/v8/src/objects/data-handler.h3
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h2
-rw-r--r--deps/v8/src/objects/debug-objects.h2
-rw-r--r--deps/v8/src/objects/descriptor-array-inl.h23
-rw-r--r--deps/v8/src/objects/descriptor-array.h16
-rw-r--r--deps/v8/src/objects/descriptor-array.tq8
-rw-r--r--deps/v8/src/objects/dictionary-inl.h20
-rw-r--r--deps/v8/src/objects/dictionary.h40
-rw-r--r--deps/v8/src/objects/elements.cc21
-rw-r--r--deps/v8/src/objects/elements.h4
-rw-r--r--deps/v8/src/objects/embedder-data-array-inl.h5
-rw-r--r--deps/v8/src/objects/embedder-data-array.h3
-rw-r--r--deps/v8/src/objects/embedder-data-slot-inl.h90
-rw-r--r--deps/v8/src/objects/embedder-data-slot.h28
-rw-r--r--deps/v8/src/objects/feedback-cell-inl.h13
-rw-r--r--deps/v8/src/objects/feedback-cell.h7
-rw-r--r--deps/v8/src/objects/feedback-vector-inl.h202
-rw-r--r--deps/v8/src/objects/feedback-vector.cc381
-rw-r--r--deps/v8/src/objects/feedback-vector.h217
-rw-r--r--deps/v8/src/objects/feedback-vector.tq18
-rw-r--r--deps/v8/src/objects/field-index-inl.h8
-rw-r--r--deps/v8/src/objects/field-index.h2
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h32
-rw-r--r--deps/v8/src/objects/fixed-array.h31
-rw-r--r--deps/v8/src/objects/foreign-inl.h16
-rw-r--r--deps/v8/src/objects/foreign.h5
-rw-r--r--deps/v8/src/objects/foreign.tq3
-rw-r--r--deps/v8/src/objects/free-space-inl.h2
-rw-r--r--deps/v8/src/objects/free-space.h3
-rw-r--r--deps/v8/src/objects/hash-table-inl.h17
-rw-r--r--deps/v8/src/objects/hash-table.h18
-rw-r--r--deps/v8/src/objects/heap-number-inl.h2
-rw-r--r--deps/v8/src/objects/heap-number.h2
-rw-r--r--deps/v8/src/objects/heap-object.h16
-rw-r--r--deps/v8/src/objects/internal-index.h8
-rw-r--r--deps/v8/src/objects/intl-objects.cc69
-rw-r--r--deps/v8/src/objects/intl-objects.tq153
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h85
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc1
-rw-r--r--deps/v8/src/objects/js-array-buffer.h24
-rw-r--r--deps/v8/src/objects/js-array-buffer.tq5
-rw-r--r--deps/v8/src/objects/js-break-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-break-iterator.h2
-rw-r--r--deps/v8/src/objects/js-break-iterator.tq17
-rw-r--r--deps/v8/src/objects/js-collator-inl.h2
-rw-r--r--deps/v8/src/objects/js-collator.h2
-rw-r--r--deps/v8/src/objects/js-collator.tq12
-rw-r--r--deps/v8/src/objects/js-collection-inl.h14
-rw-r--r--deps/v8/src/objects/js-collection-iterator-inl.h26
-rw-r--r--deps/v8/src/objects/js-collection-iterator.h2
-rw-r--r--deps/v8/src/objects/js-collection.h2
-rw-r--r--deps/v8/src/objects/js-date-time-format-inl.h2
-rw-r--r--deps/v8/src/objects/js-date-time-format.h2
-rw-r--r--deps/v8/src/objects/js-date-time-format.tq23
-rw-r--r--deps/v8/src/objects/js-display-names-inl.h2
-rw-r--r--deps/v8/src/objects/js-display-names.h2
-rw-r--r--deps/v8/src/objects/js-display-names.tq19
-rw-r--r--deps/v8/src/objects/js-function-inl.h23
-rw-r--r--deps/v8/src/objects/js-function.cc52
-rw-r--r--deps/v8/src/objects/js-function.h8
-rw-r--r--deps/v8/src/objects/js-function.tq34
-rw-r--r--deps/v8/src/objects/js-generator-inl.h2
-rw-r--r--deps/v8/src/objects/js-generator.h2
-rw-r--r--deps/v8/src/objects/js-list-format-inl.h2
-rw-r--r--deps/v8/src/objects/js-list-format.cc55
-rw-r--r--deps/v8/src/objects/js-list-format.h2
-rw-r--r--deps/v8/src/objects/js-list-format.tq19
-rw-r--r--deps/v8/src/objects/js-locale-inl.h2
-rw-r--r--deps/v8/src/objects/js-locale.h2
-rw-r--r--deps/v8/src/objects/js-locale.tq10
-rw-r--r--deps/v8/src/objects/js-number-format-inl.h2
-rw-r--r--deps/v8/src/objects/js-number-format.cc80
-rw-r--r--deps/v8/src/objects/js-number-format.h2
-rw-r--r--deps/v8/src/objects/js-number-format.tq13
-rw-r--r--deps/v8/src/objects/js-objects-inl.h46
-rw-r--r--deps/v8/src/objects/js-objects.cc303
-rw-r--r--deps/v8/src/objects/js-objects.h23
-rw-r--r--deps/v8/src/objects/js-objects.tq31
-rw-r--r--deps/v8/src/objects/js-plural-rules-inl.h2
-rw-r--r--deps/v8/src/objects/js-plural-rules.h2
-rw-r--r--deps/v8/src/objects/js-plural-rules.tq19
-rw-r--r--deps/v8/src/objects/js-promise-inl.h2
-rw-r--r--deps/v8/src/objects/js-promise.h2
-rw-r--r--deps/v8/src/objects/js-proxy-inl.h2
-rw-r--r--deps/v8/src/objects/js-proxy.h2
-rw-r--r--deps/v8/src/objects/js-regexp-inl.h2
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator.h2
-rw-r--r--deps/v8/src/objects/js-regexp.cc7
-rw-r--r--deps/v8/src/objects/js-regexp.h9
-rw-r--r--deps/v8/src/objects/js-regexp.tq1
-rw-r--r--deps/v8/src/objects/js-relative-time-format-inl.h2
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc9
-rw-r--r--deps/v8/src/objects/js-relative-time-format.h2
-rw-r--r--deps/v8/src/objects/js-relative-time-format.tq19
-rw-r--r--deps/v8/src/objects/js-segment-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-segment-iterator.h2
-rw-r--r--deps/v8/src/objects/js-segment-iterator.tq16
-rw-r--r--deps/v8/src/objects/js-segmenter-inl.h2
-rw-r--r--deps/v8/src/objects/js-segmenter.h2
-rw-r--r--deps/v8/src/objects/js-segmenter.tq18
-rw-r--r--deps/v8/src/objects/js-segments-inl.h2
-rw-r--r--deps/v8/src/objects/js-segments.h2
-rw-r--r--deps/v8/src/objects/js-segments.tq16
-rw-r--r--deps/v8/src/objects/js-weak-refs-inl.h2
-rw-r--r--deps/v8/src/objects/js-weak-refs.h2
-rw-r--r--deps/v8/src/objects/keys.cc283
-rw-r--r--deps/v8/src/objects/keys.h89
-rw-r--r--deps/v8/src/objects/layout-descriptor-inl.h7
-rw-r--r--deps/v8/src/objects/layout-descriptor.cc16
-rw-r--r--deps/v8/src/objects/literal-objects-inl.h10
-rw-r--r--deps/v8/src/objects/literal-objects.cc5
-rw-r--r--deps/v8/src/objects/literal-objects.h6
-rw-r--r--deps/v8/src/objects/lookup-cache.h3
-rw-r--r--deps/v8/src/objects/lookup.cc119
-rw-r--r--deps/v8/src/objects/map-inl.h57
-rw-r--r--deps/v8/src/objects/map-updater.cc26
-rw-r--r--deps/v8/src/objects/map.cc155
-rw-r--r--deps/v8/src/objects/map.h21
-rw-r--r--deps/v8/src/objects/maybe-object-inl.h11
-rw-r--r--deps/v8/src/objects/maybe-object.h6
-rw-r--r--deps/v8/src/objects/microtask-inl.h2
-rw-r--r--deps/v8/src/objects/microtask.h2
-rw-r--r--deps/v8/src/objects/module-inl.h18
-rw-r--r--deps/v8/src/objects/module.cc38
-rw-r--r--deps/v8/src/objects/module.h8
-rw-r--r--deps/v8/src/objects/name-inl.h2
-rw-r--r--deps/v8/src/objects/name.h2
-rw-r--r--deps/v8/src/objects/object-list-macros.h2
-rw-r--r--deps/v8/src/objects/object-macros-undef.h6
-rw-r--r--deps/v8/src/objects/object-macros.h94
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h10
-rw-r--r--deps/v8/src/objects/objects-definitions.h3
-rw-r--r--deps/v8/src/objects/objects-inl.h61
-rw-r--r--deps/v8/src/objects/objects.cc677
-rw-r--r--deps/v8/src/objects/objects.h22
-rw-r--r--deps/v8/src/objects/oddball-inl.h2
-rw-r--r--deps/v8/src/objects/oddball.h7
-rw-r--r--deps/v8/src/objects/oddball.tq6
-rw-r--r--deps/v8/src/objects/ordered-hash-table-inl.h64
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc415
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h187
-rw-r--r--deps/v8/src/objects/ordered-hash-table.tq3
-rw-r--r--deps/v8/src/objects/primitive-heap-object-inl.h3
-rw-r--r--deps/v8/src/objects/primitive-heap-object.h3
-rw-r--r--deps/v8/src/objects/promise-inl.h2
-rw-r--r--deps/v8/src/objects/promise.h2
-rw-r--r--deps/v8/src/objects/property-array-inl.h4
-rw-r--r--deps/v8/src/objects/property-array.h2
-rw-r--r--deps/v8/src/objects/property-descriptor-object-inl.h2
-rw-r--r--deps/v8/src/objects/property-descriptor-object.h2
-rw-r--r--deps/v8/src/objects/property-descriptor.cc2
-rw-r--r--deps/v8/src/objects/property.cc2
-rw-r--r--deps/v8/src/objects/prototype-info-inl.h2
-rw-r--r--deps/v8/src/objects/prototype-info.h2
-rw-r--r--deps/v8/src/objects/prototype.h4
-rw-r--r--deps/v8/src/objects/regexp-match-info.h3
-rw-r--r--deps/v8/src/objects/scope-info.cc22
-rw-r--r--deps/v8/src/objects/script-inl.h2
-rw-r--r--deps/v8/src/objects/script.h5
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h149
-rw-r--r--deps/v8/src/objects/shared-function-info.cc26
-rw-r--r--deps/v8/src/objects/shared-function-info.h31
-rw-r--r--deps/v8/src/objects/shared-function-info.tq1
-rw-r--r--deps/v8/src/objects/slots-inl.h12
-rw-r--r--deps/v8/src/objects/slots.h12
-rw-r--r--deps/v8/src/objects/source-text-module-inl.h29
-rw-r--r--deps/v8/src/objects/source-text-module.cc33
-rw-r--r--deps/v8/src/objects/source-text-module.h18
-rw-r--r--deps/v8/src/objects/source-text-module.tq10
-rw-r--r--deps/v8/src/objects/stack-frame-info-inl.h2
-rw-r--r--deps/v8/src/objects/stack-frame-info.cc3
-rw-r--r--deps/v8/src/objects/stack-frame-info.h2
-rw-r--r--deps/v8/src/objects/string-comparator.cc2
-rw-r--r--deps/v8/src/objects/string-comparator.h9
-rw-r--r--deps/v8/src/objects/string-inl.h164
-rw-r--r--deps/v8/src/objects/string-table.cc45
-rw-r--r--deps/v8/src/objects/string-table.h4
-rw-r--r--deps/v8/src/objects/string.cc83
-rw-r--r--deps/v8/src/objects/string.h73
-rw-r--r--deps/v8/src/objects/string.tq5
-rw-r--r--deps/v8/src/objects/struct-inl.h3
-rw-r--r--deps/v8/src/objects/struct.h3
-rw-r--r--deps/v8/src/objects/synthetic-module-inl.h27
-rw-r--r--deps/v8/src/objects/synthetic-module.cc3
-rw-r--r--deps/v8/src/objects/synthetic-module.h2
-rw-r--r--deps/v8/src/objects/tagged-field-inl.h12
-rw-r--r--deps/v8/src/objects/tagged-field.h12
-rw-r--r--deps/v8/src/objects/template-objects-inl.h2
-rw-r--r--deps/v8/src/objects/template-objects.h2
-rw-r--r--deps/v8/src/objects/templates-inl.h13
-rw-r--r--deps/v8/src/objects/templates.h5
-rw-r--r--deps/v8/src/objects/templates.tq (renamed from deps/v8/src/objects/template.tq)3
-rw-r--r--deps/v8/src/objects/torque-defined-classes-inl.h23
-rw-r--r--deps/v8/src/objects/torque-defined-classes.h25
-rw-r--r--deps/v8/src/objects/torque-defined-classes.tq17
-rw-r--r--deps/v8/src/objects/transitions-inl.h16
-rw-r--r--deps/v8/src/objects/transitions.cc7
-rw-r--r--deps/v8/src/objects/value-serializer.cc64
-rw-r--r--deps/v8/src/objects/value-serializer.h8
-rw-r--r--deps/v8/src/parsing/DIR_METADATA11
-rw-r--r--deps/v8/src/parsing/OWNERS2
-rw-r--r--deps/v8/src/parsing/parse-info.cc2
-rw-r--r--deps/v8/src/parsing/parse-info.h2
-rw-r--r--deps/v8/src/parsing/parser-base.h51
-rw-r--r--deps/v8/src/parsing/parser.cc236
-rw-r--r--deps/v8/src/parsing/parser.h16
-rw-r--r--deps/v8/src/parsing/rewriter.cc49
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc4
-rw-r--r--deps/v8/src/profiler/DIR_METADATA11
-rw-r--r--deps/v8/src/profiler/OWNERS2
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc69
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h26
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc13
-rw-r--r--deps/v8/src/profiler/heap-profiler.h10
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc83
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h15
-rw-r--r--deps/v8/src/profiler/profile-generator-inl.h7
-rw-r--r--deps/v8/src/profiler/profile-generator.cc198
-rw-r--r--deps/v8/src/profiler/profile-generator.h34
-rw-r--r--deps/v8/src/profiler/symbolizer.cc190
-rw-r--r--deps/v8/src/profiler/symbolizer.h44
-rw-r--r--deps/v8/src/regexp/DIR_METADATA11
-rw-r--r--deps/v8/src/regexp/OWNERS2
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc17
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h1
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc17
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h1
-rw-r--r--deps/v8/src/regexp/experimental/experimental-bytecode.h8
-rw-r--r--deps/v8/src/regexp/experimental/experimental-compiler.cc239
-rw-r--r--deps/v8/src/regexp/experimental/experimental-interpreter.cc176
-rw-r--r--deps/v8/src/regexp/experimental/experimental-interpreter.h18
-rw-r--r--deps/v8/src/regexp/experimental/experimental.cc220
-rw-r--r--deps/v8/src/regexp/experimental/experimental.h14
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc16
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h1
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc16
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h1
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc16
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h1
-rw-r--r--deps/v8/src/regexp/ppc/OWNERS1
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc20
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h1
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.cc8
-rw-r--r--deps/v8/src/regexp/regexp-compiler.cc6
-rw-r--r--deps/v8/src/regexp/regexp-error.h1
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.cc8
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.h1
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc2
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h22
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc9
-rw-r--r--deps/v8/src/regexp/regexp-parser.h4
-rw-r--r--deps/v8/src/regexp/regexp-stack.cc17
-rw-r--r--deps/v8/src/regexp/regexp-stack.h15
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc7
-rw-r--r--deps/v8/src/regexp/regexp.cc83
-rw-r--r--deps/v8/src/regexp/regexp.h11
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc16
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h1
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc16
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h1
-rw-r--r--deps/v8/src/roots/DIR_METADATA11
-rw-r--r--deps/v8/src/roots/OWNERS2
-rw-r--r--deps/v8/src/roots/roots.h7
-rw-r--r--deps/v8/src/runtime/DIR_METADATA11
-rw-r--r--deps/v8/src/runtime/OWNERS2
-rw-r--r--deps/v8/src/runtime/runtime-array.cc5
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc10
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc53
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc2
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc9
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc9
-rw-r--r--deps/v8/src/runtime/runtime-object.cc13
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc17
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc3
-rw-r--r--deps/v8/src/runtime/runtime-test.cc29
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc94
-rw-r--r--deps/v8/src/runtime/runtime.h9
-rw-r--r--deps/v8/src/snapshot/DIR_METADATA11
-rw-r--r--deps/v8/src/snapshot/OWNERS2
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc136
-rw-r--r--deps/v8/src/snapshot/code-serializer.h26
-rw-r--r--deps/v8/src/snapshot/context-deserializer.cc37
-rw-r--r--deps/v8/src/snapshot/context-deserializer.h8
-rw-r--r--deps/v8/src/snapshot/context-serializer.cc75
-rw-r--r--deps/v8/src/snapshot/context-serializer.h7
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.cc217
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.h104
-rw-r--r--deps/v8/src/snapshot/deserializer.cc961
-rw-r--r--deps/v8/src/snapshot/deserializer.h178
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.cc240
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.h157
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-empty.cc16
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-file-writer.cc120
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-file-writer.h42
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc4
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc3
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc4
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc4
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc44
-rw-r--r--deps/v8/src/snapshot/object-deserializer.h4
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.cc23
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.h9
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.cc53
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.h11
-rw-r--r--deps/v8/src/snapshot/references.h177
-rw-r--r--deps/v8/src/snapshot/roots-serializer.cc3
-rw-r--r--deps/v8/src/snapshot/roots-serializer.h6
-rw-r--r--deps/v8/src/snapshot/serializer-allocator.cc167
-rw-r--r--deps/v8/src/snapshot/serializer-allocator.h78
-rw-r--r--deps/v8/src/snapshot/serializer-deserializer.cc31
-rw-r--r--deps/v8/src/snapshot/serializer-deserializer.h99
-rw-r--r--deps/v8/src/snapshot/serializer.cc720
-rw-r--r--deps/v8/src/snapshot/serializer.h176
-rw-r--r--deps/v8/src/snapshot/snapshot-data.cc31
-rw-r--r--deps/v8/src/snapshot/snapshot-data.h30
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h30
-rw-r--r--deps/v8/src/snapshot/snapshot-utils.cc15
-rw-r--r--deps/v8/src/snapshot/snapshot-utils.h2
-rw-r--r--deps/v8/src/snapshot/snapshot.cc63
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc65
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.h10
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc76
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h16
-rw-r--r--deps/v8/src/strings/DIR_METADATA11
-rw-r--r--deps/v8/src/strings/OWNERS2
-rw-r--r--deps/v8/src/strings/char-predicates-inl.h96
-rw-r--r--deps/v8/src/strings/string-stream.cc2
-rw-r--r--deps/v8/src/strings/unicode-inl.h19
-rw-r--r--deps/v8/src/strings/unicode.h2
-rw-r--r--deps/v8/src/torque/ast.h82
-rw-r--r--deps/v8/src/torque/cc-generator.cc460
-rw-r--r--deps/v8/src/torque/cc-generator.h46
-rw-r--r--deps/v8/src/torque/constants.h14
-rw-r--r--deps/v8/src/torque/csa-generator.cc83
-rw-r--r--deps/v8/src/torque/csa-generator.h76
-rw-r--r--deps/v8/src/torque/declarable.h46
-rw-r--r--deps/v8/src/torque/declarations.cc1
-rw-r--r--deps/v8/src/torque/global-context.h14
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc773
-rw-r--r--deps/v8/src/torque/implementation-visitor.h48
-rw-r--r--deps/v8/src/torque/instance-type-generator.cc2
-rw-r--r--deps/v8/src/torque/instructions.h56
-rw-r--r--deps/v8/src/torque/runtime-macro-shims.h36
-rw-r--r--deps/v8/src/torque/torque-code-generator.cc60
-rw-r--r--deps/v8/src/torque/torque-code-generator.h93
-rw-r--r--deps/v8/src/torque/torque-compiler.cc6
-rw-r--r--deps/v8/src/torque/torque-parser.cc64
-rw-r--r--deps/v8/src/torque/type-visitor.cc32
-rw-r--r--deps/v8/src/torque/types.cc252
-rw-r--r--deps/v8/src/torque/types.h47
-rw-r--r--deps/v8/src/tracing/DIR_METADATA11
-rw-r--r--deps/v8/src/tracing/OWNERS2
-rw-r--r--deps/v8/src/tracing/trace-categories.h1
-rw-r--r--deps/v8/src/trap-handler/DIR_METADATA11
-rw-r--r--deps/v8/src/trap-handler/OWNERS2
-rw-r--r--deps/v8/src/trap-handler/handler-outside.cc12
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h21
-rw-r--r--deps/v8/src/utils/DIR_METADATA11
-rw-r--r--deps/v8/src/utils/OWNERS2
-rw-r--r--deps/v8/src/utils/bit-vector.cc2
-rw-r--r--deps/v8/src/utils/bit-vector.h2
-rw-r--r--deps/v8/src/utils/identity-map.cc144
-rw-r--r--deps/v8/src/utils/identity-map.h71
-rw-r--r--deps/v8/src/utils/locked-queue-inl.h6
-rw-r--r--deps/v8/src/utils/locked-queue.h2
-rw-r--r--deps/v8/src/utils/utils.h73
-rw-r--r--deps/v8/src/wasm/DIR_METADATA11
-rw-r--r--deps/v8/src/wasm/OWNERS2
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h108
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h123
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h122
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc23
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h59
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc242
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h4
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h76
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h101
-rw-r--r--deps/v8/src/wasm/baseline/ppc/OWNERS1
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h60
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h60
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h127
-rw-r--r--deps/v8/src/wasm/c-api.cc12
-rw-r--r--deps/v8/src/wasm/decoder.h189
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h1221
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc44
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h4
-rw-r--r--deps/v8/src/wasm/function-compiler.cc22
-rw-r--r--deps/v8/src/wasm/function-compiler.h18
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc53
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc41
-rw-r--r--deps/v8/src/wasm/memory-tracing.h8
-rw-r--r--deps/v8/src/wasm/module-compiler.cc901
-rw-r--r--deps/v8/src/wasm/module-compiler.h3
-rw-r--r--deps/v8/src/wasm/module-decoder.cc81
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc62
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc13
-rw-r--r--deps/v8/src/wasm/value-type.h133
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc50
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h25
-rw-r--r--deps/v8/src/wasm/wasm-constants.h5
-rw-r--r--deps/v8/src/wasm/wasm-debug-evaluate.cc8
-rw-r--r--deps/v8/src/wasm/wasm-debug-evaluate.h6
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc81
-rw-r--r--deps/v8/src/wasm/wasm-debug.h4
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc5
-rw-r--r--deps/v8/src/wasm/wasm-engine.h4
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc60
-rw-r--r--deps/v8/src/wasm/wasm-js.cc785
-rw-r--r--deps/v8/src/wasm/wasm-js.h4
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc2
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h4
-rw-r--r--deps/v8/src/wasm/wasm-module.cc11
-rw-r--r--deps/v8/src/wasm/wasm-module.h47
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h25
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc73
-rw-r--r--deps/v8/src/wasm/wasm-objects.h32
-rw-r--r--deps/v8/src/wasm/wasm-objects.tq13
-rw-r--r--deps/v8/src/wasm/wasm-opcodes-inl.h49
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h126
-rw-r--r--deps/v8/src/wasm/wasm-result.h9
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc14
-rw-r--r--deps/v8/src/wasm/wasm-value.h11
-rw-r--r--deps/v8/src/zone/zone-containers.h4
1111 files changed, 41766 insertions, 31134 deletions
diff --git a/deps/v8/src/DIR_METADATA b/deps/v8/src/DIR_METADATA
new file mode 100644
index 0000000000..2f8dbbcf45
--- /dev/null
+++ b/deps/v8/src/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS
index 3e21b6ea36..e5e3de50a3 100644
--- a/deps/v8/src/OWNERS
+++ b/deps/v8/src/OWNERS
@@ -1,5 +1,3 @@
per-file *DEPS=file:../COMMON_OWNERS
per-file intl-*=file:../INTL_OWNERS
per-file *-intl*=file:../INTL_OWNERS
-
-# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/api/DIR_METADATA b/deps/v8/src/api/DIR_METADATA
new file mode 100644
index 0000000000..a27ea1b53a
--- /dev/null
+++ b/deps/v8/src/api/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>API"
+} \ No newline at end of file
diff --git a/deps/v8/src/api/OWNERS b/deps/v8/src/api/OWNERS
index 4e36be20e8..519588070b 100644
--- a/deps/v8/src/api/OWNERS
+++ b/deps/v8/src/api/OWNERS
@@ -6,5 +6,3 @@ leszeks@chromium.org
mlippautz@chromium.org
mslekova@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>API
diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc
index e21dbd0eee..f8f660ea15 100644
--- a/deps/v8/src/api/api-natives.cc
+++ b/deps/v8/src/api/api-natives.cc
@@ -361,7 +361,8 @@ bool IsSimpleInstantiation(Isolate* isolate, ObjectTemplateInfo info,
if (!new_target.IsJSFunction()) return false;
JSFunction fun = JSFunction::cast(new_target);
- if (fun.shared().function_data() != info.constructor()) return false;
+ if (fun.shared().function_data(kAcquireLoad) != info.constructor())
+ return false;
if (info.immutable_proto()) return false;
return fun.context().native_context() == isolate->raw_native_context();
}
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index 11a9dce9f0..a29747da62 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -14,6 +14,7 @@
#include "include/v8-cppgc.h"
#include "include/v8-fast-api-calls.h"
#include "include/v8-profiler.h"
+#include "include/v8-unwinder-state.h"
#include "include/v8-util.h"
#include "src/api/api-inl.h"
#include "src/api/api-natives.h"
@@ -86,6 +87,7 @@
#include "src/objects/slots.h"
#include "src/objects/smi.h"
#include "src/objects/stack-frame-info-inl.h"
+#include "src/objects/synthetic-module-inl.h"
#include "src/objects/templates.h"
#include "src/objects/value-serializer.h"
#include "src/parsing/parse-info.h"
@@ -100,6 +102,7 @@
#include "src/regexp/regexp-utils.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/code-serializer.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/snapshot/startup-serializer.h" // For SerializedHandleChecker.
#include "src/strings/char-predicates-inl.h"
@@ -919,9 +922,9 @@ void ResourceConstraints::ConfigureDefaultsFromHeapSize(
i::Heap::GenerationSizesFromHeapSize(maximum_heap_size_in_bytes,
&young_generation, &old_generation);
set_max_young_generation_size_in_bytes(
- i::Max(young_generation, i::Heap::MinYoungGenerationSize()));
+ std::max(young_generation, i::Heap::MinYoungGenerationSize()));
set_max_old_generation_size_in_bytes(
- i::Max(old_generation, i::Heap::MinOldGenerationSize()));
+ std::max(old_generation, i::Heap::MinOldGenerationSize()));
if (initial_heap_size_in_bytes > 0) {
i::Heap::GenerationSizesFromHeapSize(initial_heap_size_in_bytes,
&young_generation, &old_generation);
@@ -931,7 +934,7 @@ void ResourceConstraints::ConfigureDefaultsFromHeapSize(
}
if (i::kPlatformRequiresCodeRange) {
set_code_range_size_in_bytes(
- i::Min(i::kMaximalCodeRangeSize, maximum_heap_size_in_bytes));
+ std::min(i::kMaximalCodeRangeSize, maximum_heap_size_in_bytes));
}
}
@@ -946,8 +949,8 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
if (virtual_memory_limit > 0 && i::kPlatformRequiresCodeRange) {
set_code_range_size_in_bytes(
- i::Min(i::kMaximalCodeRangeSize,
- static_cast<size_t>(virtual_memory_limit / 8)));
+ std::min(i::kMaximalCodeRangeSize,
+ static_cast<size_t>(virtual_memory_limit / 8)));
}
}
@@ -991,42 +994,6 @@ i::Address* V8::GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj,
return result.location();
}
-// static
-i::Address* i::JSMemberBase::New(v8::Isolate* isolate, i::Address* object_slot,
- i::Address** this_slot) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, JSMemberBase, New);
-#ifdef DEBUG
- Utils::ApiCheck((object_slot != nullptr), "i::JSMemberBase::New",
- "the object must be not null");
-#endif
- i::Handle<i::Object> result = i_isolate->global_handles()->CreateTraced(
- *object_slot, reinterpret_cast<i::Address*>(this_slot),
- false /* no destructor */);
-#ifdef VERIFY_HEAP
- if (i::FLAG_verify_heap) {
- i::Object(*object_slot).ObjectVerify(i_isolate);
- }
-#endif // VERIFY_HEAP
- return result.location();
-}
-
-// static
-void i::JSMemberBase::Delete(i::Address* object) {
- i::GlobalHandles::DestroyTraced(object);
-}
-
-// static
-void i::JSMemberBase::Copy(const i::Address* const* from_slot,
- i::Address** to_slot) {
- i::GlobalHandles::CopyTracedGlobal(from_slot, to_slot);
-}
-
-// static
-void i::JSMemberBase::Move(i::Address** from_slot, i::Address** to_slot) {
- i::GlobalHandles::MoveTracedGlobal(from_slot, to_slot);
-}
-
i::Address* V8::CopyGlobalReference(i::Address* from) {
i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(from);
return result.location();
@@ -1560,7 +1527,7 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
isolate, info,
i::handle(*FromCData(isolate, c_function->GetTypeInfo()), isolate));
}
- info->set_call_code(*obj);
+ info->set_call_code(*obj, kReleaseStore);
}
namespace {
@@ -2038,6 +2005,17 @@ void ObjectTemplate::SetImmutableProto() {
self->set_immutable_proto(true);
}
+bool ObjectTemplate::IsCodeLike() {
+ return Utils::OpenHandle(this)->code_like();
+}
+
+void ObjectTemplate::SetCodeLike() {
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ self->set_code_like(true);
+}
+
// --- S c r i p t s ---
// Internally, UnboundScript is a SharedFunctionInfo, and Script is a
@@ -2271,7 +2249,9 @@ Local<String> Module::GetModuleRequest(int i) const {
i::Handle<i::SourceTextModule>::cast(self)->info().module_requests(),
isolate);
CHECK_LT(i, module_requests->length());
- return ToApiHandle<String>(i::handle(module_requests->get(i), isolate));
+ i::Handle<i::ModuleRequest> module_request(
+ i::ModuleRequest::cast(module_requests->get(i)), isolate);
+ return ToApiHandle<String>(i::handle(module_request->specifier(), isolate));
}
Location Module::GetModuleRequestLocation(int i) const {
@@ -2329,6 +2309,15 @@ int Module::ScriptId() {
return ToApiHandle<UnboundScript>(sfi)->GetId();
}
+bool Module::IsGraphAsync() const {
+ Utils::ApiCheck(
+ GetStatus() >= kInstantiated, "v8::Module::IsGraphAsync",
+ "v8::Module::IsGraphAsync must be used on an instantiated module");
+ i::Handle<i::Module> self = Utils::OpenHandle(this);
+ auto isolate = reinterpret_cast<i::Isolate*>(self->GetIsolate());
+ return self->IsGraphAsync(isolate);
+}
+
bool Module::IsSourceTextModule() const {
return Utils::OpenHandle(this)->IsSourceTextModule();
}
@@ -2646,12 +2635,15 @@ void ScriptCompiler::ScriptStreamingTask::Run() { data_->task->Run(); }
ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
Isolate* v8_isolate, StreamedSource* source, CompileOptions options) {
- if (!i::FLAG_script_streaming) {
- return nullptr;
- }
// We don't support other compile options on streaming background compiles.
// TODO(rmcilroy): remove CompileOptions from the API.
CHECK(options == ScriptCompiler::kNoCompileOptions);
+ return StartStreaming(v8_isolate, source);
+}
+
+ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreaming(
+ Isolate* v8_isolate, StreamedSource* source) {
+ if (!i::FLAG_script_streaming) return nullptr;
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
i::ScriptStreamingData* data = source->impl();
std::unique_ptr<i::BackgroundCompileTask> task =
@@ -3663,6 +3655,12 @@ MaybeLocal<Uint32> Value::ToUint32(Local<Context> context) const {
RETURN_ESCAPED(result);
}
+i::Address i::DecodeExternalPointerImpl(const i::Isolate* isolate,
+ i::ExternalPointer_t encoded_pointer,
+ ExternalPointerTag tag) {
+ return i::DecodeExternalPointer(isolate, encoded_pointer, tag);
+}
+
i::Isolate* i::IsolateFromNeverReadOnlySpaceObject(i::Address obj) {
return i::GetIsolateFromWritableObject(i::HeapObject::cast(i::Object(obj)));
}
@@ -4436,7 +4434,8 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(
accumulator.GetKeys(static_cast<i::GetKeysConversion>(key_conversion));
DCHECK(self->map().EnumLength() == i::kInvalidEnumCacheSentinel ||
self->map().EnumLength() == 0 ||
- self->map().instance_descriptors().enum_cache().keys() != *value);
+ self->map().instance_descriptors(kRelaxedLoad).enum_cache().keys() !=
+ *value);
auto result = isolate->factory()->NewJSArrayWithElements(value);
RETURN_ESCAPED(Utils::ToLocal(result));
}
@@ -4941,7 +4940,8 @@ MaybeLocal<Object> Function::NewInstanceWithSideEffectType(
CHECK(self->IsJSFunction() &&
i::JSFunction::cast(*self).shared().IsApiFunction());
i::Object obj =
- i::JSFunction::cast(*self).shared().get_api_func_data().call_code();
+ i::JSFunction::cast(*self).shared().get_api_func_data().call_code(
+ kAcquireLoad);
if (obj.IsCallHandlerInfo()) {
i::CallHandlerInfo handler_info = i::CallHandlerInfo::cast(obj);
if (!handler_info.IsSideEffectFreeCallHandlerInfo()) {
@@ -4955,7 +4955,8 @@ MaybeLocal<Object> Function::NewInstanceWithSideEffectType(
i::Execution::New(isolate, self, self, argc, args), &result);
if (should_set_has_no_side_effect) {
i::Object obj =
- i::JSFunction::cast(*self).shared().get_api_func_data().call_code();
+ i::JSFunction::cast(*self).shared().get_api_func_data().call_code(
+ kAcquireLoad);
if (obj.IsCallHandlerInfo()) {
i::CallHandlerInfo handler_info = i::CallHandlerInfo::cast(obj);
if (has_pending_exception) {
@@ -5127,6 +5128,18 @@ Local<v8::Value> Function::GetBoundFunction() const {
return v8::Undefined(reinterpret_cast<v8::Isolate*>(self->GetIsolate()));
}
+MaybeLocal<String> v8::Function::FunctionProtoToString(Local<Context> context) {
+ PREPARE_FOR_EXECUTION(context, Function, FunctionProtoToString, String);
+ auto self = Utils::OpenHandle(this);
+ Local<Value> result;
+ has_pending_exception = !ToLocal<Value>(
+ i::Execution::CallBuiltin(isolate, isolate->function_to_string(), self, 0,
+ nullptr),
+ &result);
+ RETURN_ON_FAILED_EXECUTION(String);
+ RETURN_ESCAPED(Local<String>::Cast(result));
+}
+
int Name::GetIdentityHash() {
auto self = Utils::OpenHandle(this);
return static_cast<int>(self->Hash());
@@ -5532,7 +5545,8 @@ String::ExternalStringResource* String::GetExternalStringResourceSlow() const {
if (i::StringShape(str).IsExternalTwoByte()) {
internal::Isolate* isolate = I::GetIsolateForHeapSandbox(str.ptr());
internal::Address value = I::ReadExternalPointerField(
- isolate, str.ptr(), I::kStringResourceOffset);
+ isolate, str.ptr(), I::kStringResourceOffset,
+ internal::kExternalStringResourceTag);
return reinterpret_cast<String::ExternalStringResource*>(value);
}
return nullptr;
@@ -5556,7 +5570,8 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBaseSlow(
i::StringShape(str).IsExternalTwoByte()) {
internal::Isolate* isolate = I::GetIsolateForHeapSandbox(string);
internal::Address value =
- I::ReadExternalPointerField(isolate, string, I::kStringResourceOffset);
+ I::ReadExternalPointerField(isolate, string, I::kStringResourceOffset,
+ internal::kExternalStringResourceTag);
resource = reinterpret_cast<ExternalStringResourceBase*>(value);
}
return resource;
@@ -5876,6 +5891,10 @@ void V8::GetSharedMemoryStatistics(SharedMemoryStatistics* statistics) {
i::ReadOnlyHeap::PopulateReadOnlySpaceStatistics(statistics);
}
+void V8::SetIsCrossOriginIsolated() {
+ i::FLAG_harmony_sharedarraybuffer = true;
+}
+
template <typename ObjectType>
struct InvokeBootstrapper;
@@ -6118,12 +6137,6 @@ v8::Isolate* Context::GetIsolate() {
return reinterpret_cast<Isolate*>(env->GetIsolate());
}
-v8::MicrotaskQueue* Context::GetMicrotaskQueue() {
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- CHECK(env->IsNativeContext());
- return i::Handle<i::NativeContext>::cast(env)->microtask_queue();
-}
-
v8::Local<v8::Object> Context::Global() {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
@@ -6839,6 +6852,7 @@ REGEXP_FLAG_ASSERT_EQ(kIgnoreCase);
REGEXP_FLAG_ASSERT_EQ(kMultiline);
REGEXP_FLAG_ASSERT_EQ(kSticky);
REGEXP_FLAG_ASSERT_EQ(kUnicode);
+REGEXP_FLAG_ASSERT_EQ(kLinear);
#undef REGEXP_FLAG_ASSERT_EQ
v8::RegExp::Flags v8::RegExp::GetFlags() const {
@@ -7015,10 +7029,11 @@ i::Handle<i::JSArray> MapAsArray(i::Isolate* isolate, i::Object table_obj,
i::DisallowHeapAllocation no_gc;
i::Oddball the_hole = i::ReadOnlyRoots(isolate).the_hole_value();
for (int i = offset; i < capacity; ++i) {
- i::Object key = table->KeyAt(i);
+ i::InternalIndex entry(i);
+ i::Object key = table->KeyAt(entry);
if (key == the_hole) continue;
if (collect_keys) result->set(result_index++, key);
- if (collect_values) result->set(result_index++, table->ValueAt(i));
+ if (collect_values) result->set(result_index++, table->ValueAt(entry));
}
}
DCHECK_GE(max_length, result_index);
@@ -7118,7 +7133,8 @@ i::Handle<i::JSArray> SetAsArray(i::Isolate* isolate, i::Object table_obj,
i::DisallowHeapAllocation no_gc;
i::Oddball the_hole = i::ReadOnlyRoots(isolate).the_hole_value();
for (int i = offset; i < capacity; ++i) {
- i::Object key = table->KeyAt(i);
+ i::InternalIndex entry(i);
+ i::Object key = table->KeyAt(entry);
if (key == the_hole) continue;
result->set(result_index++, key);
if (collect_key_values) result->set(result_index++, key);
@@ -7314,6 +7330,7 @@ CompiledWasmModule::CompiledWasmModule(
}
OwnedBuffer CompiledWasmModule::Serialize() {
+ TRACE_EVENT0("v8.wasm", "wasm.SerializeModule");
i::wasm::WasmSerializer wasm_serializer(native_module_.get());
size_t buffer_size = wasm_serializer.GetSerializedNativeModuleSize();
std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
@@ -7665,7 +7682,7 @@ Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) {
i::Handle<i::JSArrayBufferView> self = Utils::OpenHandle(this);
size_t byte_offset = self->byte_offset();
- size_t bytes_to_copy = i::Min(byte_length, self->byte_length());
+ size_t bytes_to_copy = std::min(byte_length, self->byte_length());
if (bytes_to_copy) {
i::DisallowHeapAllocation no_gc;
i::Isolate* isolate = self->GetIsolate();
@@ -8995,6 +9012,14 @@ void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
*length_in_bytes = code_range.size();
}
+void Isolate::GetEmbeddedCodeRange(const void** start,
+ size_t* length_in_bytes) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::EmbeddedData d = i::EmbeddedData::FromBlob(isolate);
+ *start = reinterpret_cast<const void*>(d.code());
+ *length_in_bytes = d.code_size();
+}
+
JSEntryStubs Isolate::GetJSEntryStubs() {
JSEntryStubs entry_stubs;
@@ -9050,6 +9075,9 @@ CALLBACK_SETTER(AllowCodeGenerationFromStringsCallback,
CALLBACK_SETTER(ModifyCodeGenerationFromStringsCallback,
ModifyCodeGenerationFromStringsCallback,
modify_code_gen_callback)
+CALLBACK_SETTER(ModifyCodeGenerationFromStringsCallback,
+ ModifyCodeGenerationFromStringsCallback2,
+ modify_code_gen_callback2)
CALLBACK_SETTER(AllowWasmCodeGenerationCallback,
AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback)
@@ -9199,6 +9227,14 @@ void v8::Isolate::LocaleConfigurationChangeNotification() {
#endif // V8_INTL_SUPPORT
}
+bool v8::Object::IsCodeLike(v8::Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, Object, IsCodeLike);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ i::HandleScope scope(i_isolate);
+ return Utils::OpenHandle(this)->IsCodeLike(i_isolate);
+}
+
// static
std::unique_ptr<MicrotaskQueue> MicrotaskQueue::New(Isolate* isolate,
MicrotasksPolicy policy) {
@@ -9827,7 +9863,7 @@ void debug::ForceGarbageCollection(
v8::Isolate* isolate,
v8::EmbedderHeapTracer::EmbedderStackState embedder_stack_state) {
i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
- heap->SetEmbedderStackStateForNextFinalizaton(embedder_stack_state);
+ heap->SetEmbedderStackStateForNextFinalization(embedder_stack_state);
isolate->LowMemoryNotification();
}
@@ -9943,6 +9979,10 @@ int debug::WasmScript::CodeOffset() const {
i::wasm::NativeModule* native_module = script->wasm_native_module();
const i::wasm::WasmModule* module = native_module->module();
+ // If the module contains at least one function, the code offset must have
+ // been initialized, and it cannot be zero.
+ DCHECK_IMPLIES(module->num_declared_functions > 0,
+ module->code.offset() != 0);
return module->code.offset();
}
@@ -10289,6 +10329,12 @@ debug::PostponeInterruptsScope::PostponeInterruptsScope(v8::Isolate* isolate)
debug::PostponeInterruptsScope::~PostponeInterruptsScope() = default;
+debug::DisableBreakScope::DisableBreakScope(v8::Isolate* isolate)
+ : scope_(std::make_unique<i::DisableBreak>(
+ reinterpret_cast<i::Isolate*>(isolate)->debug())) {}
+
+debug::DisableBreakScope::~DisableBreakScope() = default;
+
Local<String> CpuProfileNode::GetFunctionName() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
i::Isolate* isolate = node->isolate();
@@ -10695,24 +10741,27 @@ void CpuProfiler::SetUsePreciseSampling(bool use_precise_sampling) {
use_precise_sampling);
}
-void CpuProfiler::StartProfiling(Local<String> title,
- CpuProfilingOptions options) {
- reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
+CpuProfilingStatus CpuProfiler::StartProfiling(Local<String> title,
+ CpuProfilingOptions options) {
+ return reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
*Utils::OpenHandle(*title), options);
}
-void CpuProfiler::StartProfiling(Local<String> title, bool record_samples) {
+CpuProfilingStatus CpuProfiler::StartProfiling(Local<String> title,
+ bool record_samples) {
CpuProfilingOptions options(
kLeafNodeLineNumbers,
record_samples ? CpuProfilingOptions::kNoSampleLimit : 0);
- reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
+ return reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
*Utils::OpenHandle(*title), options);
}
-void CpuProfiler::StartProfiling(Local<String> title, CpuProfilingMode mode,
- bool record_samples, unsigned max_samples) {
+CpuProfilingStatus CpuProfiler::StartProfiling(Local<String> title,
+ CpuProfilingMode mode,
+ bool record_samples,
+ unsigned max_samples) {
CpuProfilingOptions options(mode, record_samples ? max_samples : 0);
- reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
+ return reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
*Utils::OpenHandle(*title), options);
}
@@ -11004,6 +11053,12 @@ void HeapProfiler::RemoveBuildEmbedderGraphCallback(
callback, data);
}
+void HeapProfiler::SetGetDetachednessCallback(GetDetachednessCallback callback,
+ void* data) {
+ reinterpret_cast<i::HeapProfiler*>(this)->SetGetDetachednessCallback(callback,
+ data);
+}
+
void EmbedderHeapTracer::SetStackStart(void* stack_start) {
CHECK(isolate_);
reinterpret_cast<i::Isolate*>(isolate_)->global_handles()->SetStackStart(
@@ -11032,7 +11087,7 @@ void EmbedderHeapTracer::GarbageCollectionForTesting(
CHECK(isolate_);
CHECK(i::FLAG_expose_gc);
i::Heap* const heap = reinterpret_cast<i::Isolate*>(isolate_)->heap();
- heap->SetEmbedderStackStateForNextFinalizaton(stack_state);
+ heap->SetEmbedderStackStateForNextFinalization(stack_state);
heap->PreciseCollectAllGarbage(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting,
kGCCallbackFlagForced);
@@ -11061,7 +11116,7 @@ void EmbedderHeapTracer::DecreaseAllocatedSize(size_t bytes) {
}
void EmbedderHeapTracer::RegisterEmbedderReference(
- const TracedReferenceBase<v8::Data>& ref) {
+ const BasicTracedReference<v8::Data>& ref) {
if (ref.IsEmpty()) return;
i::Heap* const heap = reinterpret_cast<i::Isolate*>(isolate_)->heap();
@@ -11119,6 +11174,33 @@ CFunction::CFunction(const void* address, const CFunctionInfo* type_info)
}
}
+RegisterState::RegisterState()
+ : pc(nullptr), sp(nullptr), fp(nullptr), lr(nullptr) {}
+RegisterState::~RegisterState() = default;
+
+RegisterState::RegisterState(const RegisterState& other) V8_NOEXCEPT {
+ *this = other;
+}
+
+RegisterState& RegisterState::operator=(const RegisterState& other)
+ V8_NOEXCEPT {
+ if (&other != this) {
+ pc = other.pc;
+ sp = other.sp;
+ fp = other.fp;
+ lr = other.lr;
+ if (other.callee_saved) {
+ // Make a deep copy if {other.callee_saved} is non-null.
+ callee_saved =
+ std::make_unique<CalleeSavedRegisters>(*(other.callee_saved));
+ } else {
+ // Otherwise, set {callee_saved} to null to match {other}.
+ callee_saved.reset();
+ }
+ }
+ return *this;
+}
+
namespace internal {
const size_t HandleScopeImplementer::kEnteredContextsOffset =
diff --git a/deps/v8/src/asmjs/DIR_METADATA b/deps/v8/src/asmjs/DIR_METADATA
new file mode 100644
index 0000000000..3b428d9660
--- /dev/null
+++ b/deps/v8/src/asmjs/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>WebAssembly"
+} \ No newline at end of file
diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS
index 16b08f3b3b..c400f97de0 100644
--- a/deps/v8/src/asmjs/OWNERS
+++ b/deps/v8/src/asmjs/OWNERS
@@ -1,5 +1,3 @@
ahaas@chromium.org
clemensb@chromium.org
titzer@chromium.org
-
-# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/src/ast/DIR_METADATA b/deps/v8/src/ast/DIR_METADATA
new file mode 100644
index 0000000000..165380ae4f
--- /dev/null
+++ b/deps/v8/src/ast/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Parser"
+} \ No newline at end of file
diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS
index 089db4c252..1da57bd30d 100644
--- a/deps/v8/src/ast/OWNERS
+++ b/deps/v8/src/ast/OWNERS
@@ -6,5 +6,3 @@ littledan@chromium.org
marja@chromium.org
neis@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>Parser
diff --git a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
index b583b5e421..8c9318bfe7 100644
--- a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
+++ b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
@@ -54,10 +54,10 @@ void AstFunctionLiteralIdReindexer::VisitClassLiteral(ClassLiteral* expr) {
// Private fields have their key and value present in
// instance_members_initializer_function, so they will
// already have been visited.
- if (prop->value()->IsFunctionLiteral()) {
- Visit(prop->value());
- } else {
+ if (prop->kind() == ClassLiteralProperty::Kind::FIELD) {
CheckVisited(prop->value());
+ } else {
+ Visit(prop->value());
}
}
ZonePtrList<ClassLiteral::Property>* props = expr->public_members();
@@ -67,7 +67,8 @@ void AstFunctionLiteralIdReindexer::VisitClassLiteral(ClassLiteral* expr) {
// Public fields with computed names have their key
// and value present in instance_members_initializer_function, so they will
// already have been visited.
- if (prop->is_computed_name() && !prop->value()->IsFunctionLiteral()) {
+ if (prop->is_computed_name() &&
+ prop->kind() == ClassLiteralProperty::Kind::FIELD) {
if (!prop->key()->IsLiteral()) {
CheckVisited(prop->key());
}
diff --git a/deps/v8/src/ast/ast-source-ranges.h b/deps/v8/src/ast/ast-source-ranges.h
index 1e96ec4c27..1b42a055dd 100644
--- a/deps/v8/src/ast/ast-source-ranges.h
+++ b/deps/v8/src/ast/ast-source-ranges.h
@@ -47,7 +47,6 @@ struct SourceRange {
V(Block) \
V(CaseClause) \
V(Conditional) \
- V(Expression) \
V(FunctionLiteral) \
V(IfStatement) \
V(IterationStatement) \
@@ -282,24 +281,6 @@ class NaryOperationSourceRanges final : public AstNodeSourceRanges {
ZoneVector<SourceRange> ranges_;
};
-class ExpressionSourceRanges final : public AstNodeSourceRanges {
- public:
- explicit ExpressionSourceRanges(const SourceRange& right_range)
- : right_range_(right_range) {}
-
- SourceRange GetRange(SourceRangeKind kind) override {
- DCHECK(HasRange(kind));
- return right_range_;
- }
-
- bool HasRange(SourceRangeKind kind) override {
- return kind == SourceRangeKind::kRight;
- }
-
- private:
- SourceRange right_range_;
-};
-
class SuspendSourceRanges final : public ContinuationSourceRanges {
public:
explicit SuspendSourceRanges(int32_t continuation_position)
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index 598096ba10..b5a39b22cf 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -27,12 +27,14 @@
#include "src/ast/ast-value-factory.h"
+#include "src/base/hashmap-entry.h"
#include "src/base/logging.h"
#include "src/common/globals.h"
#include "src/heap/factory-inl.h"
#include "src/heap/local-factory-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
+#include "src/objects/string.h"
#include "src/strings/char-predicates-inl.h"
#include "src/strings/string-hasher.h"
#include "src/utils/utils-inl.h"
@@ -113,9 +115,7 @@ uint16_t AstRawString::FirstCharacter() const {
return *c;
}
-bool AstRawString::Compare(void* a, void* b) {
- const AstRawString* lhs = static_cast<AstRawString*>(a);
- const AstRawString* rhs = static_cast<AstRawString*>(b);
+bool AstRawString::Compare(const AstRawString* lhs, const AstRawString* rhs) {
DCHECK_EQ(lhs->Hash(), rhs->Hash());
if (lhs->length() != rhs->length()) return false;
@@ -194,14 +194,17 @@ Handle<String> AstConsString::AllocateFlat(LocalIsolate* isolate) const {
->NewRawOneByteString(result_length, AllocationType::kOld)
.ToHandleChecked();
DisallowHeapAllocation no_gc;
- uint8_t* dest = result->GetChars(no_gc) + result_length;
+ uint8_t* dest =
+ result->GetChars(no_gc, SharedStringAccessGuardIfNeeded::NotNeeded()) +
+ result_length;
for (const AstConsString::Segment* current = &segment_; current != nullptr;
current = current->next) {
int length = current->string->length();
dest -= length;
CopyChars(dest, current->string->raw_data(), length);
}
- DCHECK_EQ(dest, result->GetChars(no_gc));
+ DCHECK_EQ(dest, result->GetChars(
+ no_gc, SharedStringAccessGuardIfNeeded::NotNeeded()));
return result;
}
@@ -210,7 +213,9 @@ Handle<String> AstConsString::AllocateFlat(LocalIsolate* isolate) const {
->NewRawTwoByteString(result_length, AllocationType::kOld)
.ToHandleChecked();
DisallowHeapAllocation no_gc;
- uint16_t* dest = result->GetChars(no_gc) + result_length;
+ uint16_t* dest =
+ result->GetChars(no_gc, SharedStringAccessGuardIfNeeded::NotNeeded()) +
+ result_length;
for (const AstConsString::Segment* current = &segment_; current != nullptr;
current = current->next) {
int length = current->string->length();
@@ -223,7 +228,8 @@ Handle<String> AstConsString::AllocateFlat(LocalIsolate* isolate) const {
length);
}
}
- DCHECK_EQ(dest, result->GetChars(no_gc));
+ DCHECK_EQ(dest, result->GetChars(
+ no_gc, SharedStringAccessGuardIfNeeded::NotNeeded()));
return result;
}
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
@@ -248,7 +254,7 @@ std::forward_list<const AstRawString*> AstConsString::ToRawStrings() const {
AstStringConstants::AstStringConstants(Isolate* isolate, uint64_t hash_seed)
: zone_(isolate->allocator(), ZONE_NAME),
- string_table_(AstRawString::Compare),
+ string_table_(),
hash_seed_(hash_seed) {
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
#define F(name, str) \
@@ -262,16 +268,13 @@ AstStringConstants::AstStringConstants(Isolate* isolate, uint64_t hash_seed)
/* The Handle returned by the factory is located on the roots */ \
/* array, not on the temporary HandleScope, so this is safe. */ \
name##_string_->set_string(isolate->factory()->name##_string()); \
- base::HashMap::Entry* entry = \
- string_table_.InsertNew(name##_string_, name##_string_->Hash()); \
- DCHECK_NULL(entry->value); \
- entry->value = reinterpret_cast<void*>(1); \
+ string_table_.InsertNew(name##_string_, name##_string_->Hash()); \
}
AST_STRING_CONSTANTS(F)
#undef F
}
-AstRawString* AstValueFactory::GetOneByteStringInternal(
+const AstRawString* AstValueFactory::GetOneByteStringInternal(
Vector<const uint8_t> literal) {
if (literal.length() == 1 && literal[0] < kMaxOneCharStringValue) {
int key = literal[0];
@@ -287,7 +290,7 @@ AstRawString* AstValueFactory::GetOneByteStringInternal(
return GetString(hash_field, true, literal);
}
-AstRawString* AstValueFactory::GetTwoByteStringInternal(
+const AstRawString* AstValueFactory::GetTwoByteStringInternal(
Vector<const uint16_t> literal) {
uint32_t hash_field = StringHasher::HashSequentialString<uint16_t>(
literal.begin(), literal.length(), hash_seed_);
@@ -295,7 +298,7 @@ AstRawString* AstValueFactory::GetTwoByteStringInternal(
}
const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
- AstRawString* result = nullptr;
+ const AstRawString* result = nullptr;
DisallowHeapAllocation no_gc;
String::FlatContent content = literal->GetFlatContent(no_gc);
if (content.IsOneByte()) {
@@ -348,27 +351,29 @@ template EXPORT_TEMPLATE_DEFINE(
template EXPORT_TEMPLATE_DEFINE(
V8_EXPORT_PRIVATE) void AstValueFactory::Internalize(LocalIsolate* isolate);
-AstRawString* AstValueFactory::GetString(uint32_t hash_field, bool is_one_byte,
- Vector<const byte> literal_bytes) {
+const AstRawString* AstValueFactory::GetString(
+ uint32_t hash_field, bool is_one_byte, Vector<const byte> literal_bytes) {
// literal_bytes here points to whatever the user passed, and this is OK
// because we use vector_compare (which checks the contents) to compare
// against the AstRawStrings which are in the string_table_. We should not
// return this AstRawString.
AstRawString key(is_one_byte, literal_bytes, hash_field);
- base::HashMap::Entry* entry = string_table_.LookupOrInsert(&key, key.Hash());
- if (entry->value == nullptr) {
- // Copy literal contents for later comparison.
- int length = literal_bytes.length();
- byte* new_literal_bytes = zone()->NewArray<byte>(length);
- memcpy(new_literal_bytes, literal_bytes.begin(), length);
- AstRawString* new_string = zone()->New<AstRawString>(
- is_one_byte, Vector<const byte>(new_literal_bytes, length), hash_field);
- CHECK_NOT_NULL(new_string);
- AddString(new_string);
- entry->key = new_string;
- entry->value = reinterpret_cast<void*>(1);
- }
- return reinterpret_cast<AstRawString*>(entry->key);
+ AstRawStringMap::Entry* entry = string_table_.LookupOrInsert(
+ &key, key.Hash(),
+ [&]() {
+ // Copy literal contents for later comparison.
+ int length = literal_bytes.length();
+ byte* new_literal_bytes = zone()->NewArray<byte>(length);
+ memcpy(new_literal_bytes, literal_bytes.begin(), length);
+ AstRawString* new_string = zone()->New<AstRawString>(
+ is_one_byte, Vector<const byte>(new_literal_bytes, length),
+ hash_field);
+ CHECK_NOT_NULL(new_string);
+ AddString(new_string);
+ return new_string;
+ },
+ [&]() { return base::NoHashMapValue(); });
+ return entry->key;
}
} // namespace internal
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index 1752498123..776b45a670 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -48,6 +48,8 @@ class Isolate;
class AstRawString final : public ZoneObject {
public:
+ static bool Compare(const AstRawString* a, const AstRawString* b);
+
bool IsEmpty() const { return literal_bytes_.length() == 0; }
int length() const {
return is_one_byte() ? literal_bytes_.length()
@@ -85,7 +87,6 @@ class AstRawString final : public ZoneObject {
friend Zone;
// Members accessed only by the AstValueFactory & related classes:
- static bool Compare(void* a, void* b);
AstRawString(bool is_one_byte, const Vector<const byte>& literal_bytes,
uint32_t hash_field)
: next_(nullptr),
@@ -205,12 +206,26 @@ class AstBigInt {
const char* bigint_;
};
+struct AstRawStringMapMatcher {
+ bool operator()(uint32_t hash1, uint32_t hash2,
+ const AstRawString* lookup_key,
+ const AstRawString* entry_key) const {
+ return hash1 == hash2 && AstRawString::Compare(lookup_key, entry_key);
+ }
+};
+
+using AstRawStringMap =
+ base::TemplateHashMapImpl<const AstRawString*, base::NoHashMapValue,
+ AstRawStringMapMatcher,
+ base::DefaultAllocationPolicy>;
+
// For generating constants.
#define AST_STRING_CONSTANTS(F) \
F(anonymous, "anonymous") \
F(anonymous_function, "(anonymous function)") \
F(arguments, "arguments") \
F(as, "as") \
+ F(assert, "assert") \
F(async, "async") \
F(await, "await") \
F(bigint, "bigint") \
@@ -269,13 +284,11 @@ class AstStringConstants final {
#undef F
uint64_t hash_seed() const { return hash_seed_; }
- const base::CustomMatcherHashMap* string_table() const {
- return &string_table_;
- }
+ const AstRawStringMap* string_table() const { return &string_table_; }
private:
Zone zone_;
- base::CustomMatcherHashMap string_table_;
+ AstRawStringMap string_table_;
uint64_t hash_seed_;
#define F(name, str) AstRawString* name##_string_;
@@ -353,14 +366,14 @@ class AstValueFactory {
strings_ = nullptr;
strings_end_ = &strings_;
}
- V8_EXPORT_PRIVATE AstRawString* GetOneByteStringInternal(
+ V8_EXPORT_PRIVATE const AstRawString* GetOneByteStringInternal(
Vector<const uint8_t> literal);
- AstRawString* GetTwoByteStringInternal(Vector<const uint16_t> literal);
- AstRawString* GetString(uint32_t hash, bool is_one_byte,
- Vector<const byte> literal_bytes);
+ const AstRawString* GetTwoByteStringInternal(Vector<const uint16_t> literal);
+ const AstRawString* GetString(uint32_t hash, bool is_one_byte,
+ Vector<const byte> literal_bytes);
- // All strings are copied here, one after another (no zeroes inbetween).
- base::CustomMatcherHashMap string_table_;
+ // All strings are copied here.
+ AstRawStringMap string_table_;
AstRawString* strings_;
AstRawString** strings_end_;
@@ -372,7 +385,7 @@ class AstValueFactory {
// Caches one character lowercase strings (for minified code).
static const int kMaxOneCharStringValue = 128;
- AstRawString* one_character_strings_[kMaxOneCharStringValue];
+ const AstRawString* one_character_strings_[kMaxOneCharStringValue];
Zone* zone_;
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index b40cf83c82..e8c7796abc 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -223,12 +223,6 @@ bool FunctionLiteral::AllowsLazyCompilation() {
return scope()->AllowsLazyCompilation();
}
-bool FunctionLiteral::SafeToSkipArgumentsAdaptor() const {
- return language_mode() == LanguageMode::kStrict &&
- scope()->arguments() == nullptr &&
- scope()->rest_parameter() == nullptr;
-}
-
int FunctionLiteral::start_position() const {
return scope()->start_position();
}
@@ -438,7 +432,7 @@ int ObjectLiteral::InitDepthAndFlags() {
// literal with fast elements will be a waste of space.
uint32_t element_index = 0;
if (key->AsArrayIndex(&element_index)) {
- max_element_index = Max(element_index, max_element_index);
+ max_element_index = std::max(element_index, max_element_index);
elements++;
} else {
DCHECK(key->IsPropertyName());
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 4213c60f24..7b70181e6a 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -2160,18 +2160,6 @@ class FunctionLiteral final : public Expression {
return false;
}
- // We can safely skip the arguments adaptor frame setup even
- // in case of arguments mismatches for strict mode functions,
- // as long as there's
- //
- // 1. no use of the arguments object (either explicitly or
- // potentially implicitly via a direct eval() call), and
- // 2. rest parameters aren't being used in the function.
- //
- // See http://bit.ly/v8-faster-calls-with-arguments-mismatch
- // for the details here (https://crbug.com/v8/8895).
- bool SafeToSkipArgumentsAdaptor() const;
-
// Returns either name or inferred name as a cstring.
std::unique_ptr<char[]> GetDebugName() const;
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index 08fbe76102..3c9a5080ad 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -16,43 +16,78 @@ namespace internal {
bool SourceTextModuleDescriptor::AstRawStringComparer::operator()(
const AstRawString* lhs, const AstRawString* rhs) const {
+ return ThreeWayCompare(lhs, rhs) < 0;
+}
+
+int SourceTextModuleDescriptor::AstRawStringComparer::ThreeWayCompare(
+ const AstRawString* lhs, const AstRawString* rhs) {
// Fast path for equal pointers: a pointer is not strictly less than itself.
if (lhs == rhs) return false;
// Order by contents (ordering by hash is unstable across runs).
if (lhs->is_one_byte() != rhs->is_one_byte()) {
- return lhs->is_one_byte();
+ return lhs->is_one_byte() ? -1 : 1;
}
if (lhs->byte_length() != rhs->byte_length()) {
- return lhs->byte_length() < rhs->byte_length();
+ return lhs->byte_length() - rhs->byte_length();
}
- return memcmp(lhs->raw_data(), rhs->raw_data(), lhs->byte_length()) < 0;
+ return memcmp(lhs->raw_data(), rhs->raw_data(), lhs->byte_length());
+}
+
+bool SourceTextModuleDescriptor::ModuleRequestComparer::operator()(
+ const AstModuleRequest* lhs, const AstModuleRequest* rhs) const {
+ if (int specifier_comparison = AstRawStringComparer::ThreeWayCompare(
+ lhs->specifier(), rhs->specifier()))
+ return specifier_comparison < 0;
+
+ if (lhs->import_assertions()->size() != rhs->import_assertions()->size())
+ return (lhs->import_assertions()->size() <
+ rhs->import_assertions()->size());
+
+ auto lhsIt = lhs->import_assertions()->cbegin();
+ auto rhsIt = rhs->import_assertions()->cbegin();
+ for (; lhsIt != lhs->import_assertions()->cend(); ++lhsIt, ++rhsIt) {
+ if (int assertion_key_comparison =
+ AstRawStringComparer::ThreeWayCompare(lhsIt->first, rhsIt->first))
+ return assertion_key_comparison < 0;
+
+ if (int assertion_value_comparison = AstRawStringComparer::ThreeWayCompare(
+ lhsIt->second.first, rhsIt->second.first))
+ return assertion_value_comparison < 0;
+ }
+
+ return false;
}
void SourceTextModuleDescriptor::AddImport(
const AstRawString* import_name, const AstRawString* local_name,
- const AstRawString* module_request, const Scanner::Location loc,
+ const AstRawString* module_request,
+ const ImportAssertions* import_assertions, const Scanner::Location loc,
const Scanner::Location specifier_loc, Zone* zone) {
Entry* entry = zone->New<Entry>(loc);
entry->local_name = local_name;
entry->import_name = import_name;
- entry->module_request = AddModuleRequest(module_request, specifier_loc);
+ entry->module_request =
+ AddModuleRequest(module_request, import_assertions, specifier_loc, zone);
AddRegularImport(entry);
}
void SourceTextModuleDescriptor::AddStarImport(
const AstRawString* local_name, const AstRawString* module_request,
- const Scanner::Location loc, const Scanner::Location specifier_loc,
- Zone* zone) {
+ const ImportAssertions* import_assertions, const Scanner::Location loc,
+ const Scanner::Location specifier_loc, Zone* zone) {
Entry* entry = zone->New<Entry>(loc);
entry->local_name = local_name;
- entry->module_request = AddModuleRequest(module_request, specifier_loc);
+ entry->module_request =
+ AddModuleRequest(module_request, import_assertions, specifier_loc, zone);
AddNamespaceImport(entry, zone);
}
void SourceTextModuleDescriptor::AddEmptyImport(
- const AstRawString* module_request, const Scanner::Location specifier_loc) {
- AddModuleRequest(module_request, specifier_loc);
+ const AstRawString* module_request,
+ const ImportAssertions* import_assertions,
+ const Scanner::Location specifier_loc, Zone* zone) {
+ AddModuleRequest(module_request, import_assertions, specifier_loc, zone);
}
void SourceTextModuleDescriptor::AddExport(const AstRawString* local_name,
@@ -66,22 +101,26 @@ void SourceTextModuleDescriptor::AddExport(const AstRawString* local_name,
void SourceTextModuleDescriptor::AddExport(
const AstRawString* import_name, const AstRawString* export_name,
- const AstRawString* module_request, const Scanner::Location loc,
+ const AstRawString* module_request,
+ const ImportAssertions* import_assertions, const Scanner::Location loc,
const Scanner::Location specifier_loc, Zone* zone) {
DCHECK_NOT_NULL(import_name);
DCHECK_NOT_NULL(export_name);
Entry* entry = zone->New<Entry>(loc);
entry->export_name = export_name;
entry->import_name = import_name;
- entry->module_request = AddModuleRequest(module_request, specifier_loc);
+ entry->module_request =
+ AddModuleRequest(module_request, import_assertions, specifier_loc, zone);
AddSpecialExport(entry, zone);
}
void SourceTextModuleDescriptor::AddStarExport(
- const AstRawString* module_request, const Scanner::Location loc,
+ const AstRawString* module_request,
+ const ImportAssertions* import_assertions, const Scanner::Location loc,
const Scanner::Location specifier_loc, Zone* zone) {
Entry* entry = zone->New<Entry>(loc);
- entry->module_request = AddModuleRequest(module_request, specifier_loc);
+ entry->module_request =
+ AddModuleRequest(module_request, import_assertions, specifier_loc, zone);
AddSpecialExport(entry, zone);
}
@@ -95,6 +134,32 @@ Handle<PrimitiveHeapObject> ToStringOrUndefined(LocalIsolate* isolate,
} // namespace
template <typename LocalIsolate>
+Handle<ModuleRequest> SourceTextModuleDescriptor::AstModuleRequest::Serialize(
+ LocalIsolate* isolate) const {
+ // The import assertions will be stored in this array in the form:
+ // [key1, value1, location1, key2, value2, location2, ...]
+ Handle<FixedArray> import_assertions_array =
+ isolate->factory()->NewFixedArray(
+ static_cast<int>(import_assertions()->size() * 3));
+
+ int i = 0;
+ for (auto iter = import_assertions()->cbegin();
+ iter != import_assertions()->cend(); ++iter, i += 3) {
+ import_assertions_array->set(i, *iter->first->string());
+ import_assertions_array->set(i + 1, *iter->second.first->string());
+ import_assertions_array->set(i + 2,
+ Smi::FromInt(iter->second.second.beg_pos));
+ }
+ return v8::internal::ModuleRequest::New(isolate, specifier()->string(),
+ import_assertions_array);
+}
+template Handle<ModuleRequest>
+SourceTextModuleDescriptor::AstModuleRequest::Serialize(Isolate* isolate) const;
+template Handle<ModuleRequest>
+SourceTextModuleDescriptor::AstModuleRequest::Serialize(
+ LocalIsolate* isolate) const;
+
+template <typename LocalIsolate>
Handle<SourceTextModuleInfoEntry> SourceTextModuleDescriptor::Entry::Serialize(
LocalIsolate* isolate) const {
CHECK(Smi::IsValid(module_request)); // TODO(neis): Check earlier?
diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h
index b57387b25f..f156d7a411 100644
--- a/deps/v8/src/ast/modules.h
+++ b/deps/v8/src/ast/modules.h
@@ -13,6 +13,7 @@ namespace internal {
class AstRawString;
+class ModuleRequest;
class SourceTextModuleInfo;
class SourceTextModuleInfoEntry;
class PendingCompilationErrorHandler;
@@ -26,6 +27,10 @@ class SourceTextModuleDescriptor : public ZoneObject {
regular_exports_(zone),
regular_imports_(zone) {}
+ using ImportAssertions =
+ ZoneMap<const AstRawString*,
+ std::pair<const AstRawString*, Scanner::Location>>;
+
// The following Add* methods are high-level convenience functions for use by
// the parser.
@@ -35,12 +40,14 @@ class SourceTextModuleDescriptor : public ZoneObject {
void AddImport(const AstRawString* import_name,
const AstRawString* local_name,
const AstRawString* module_request,
+ const ImportAssertions* import_assertions,
const Scanner::Location loc,
const Scanner::Location specifier_loc, Zone* zone);
// import * as x from "foo.js";
void AddStarImport(const AstRawString* local_name,
const AstRawString* module_request,
+ const ImportAssertions* import_assertions,
const Scanner::Location loc,
const Scanner::Location specifier_loc, Zone* zone);
@@ -48,7 +55,8 @@ class SourceTextModuleDescriptor : public ZoneObject {
// import {} from "foo.js";
// export {} from "foo.js"; (sic!)
void AddEmptyImport(const AstRawString* module_request,
- const Scanner::Location specifier_loc);
+ const ImportAssertions* import_assertions,
+ const Scanner::Location specifier_loc, Zone* zone);
// export {x};
// export {x as y};
@@ -64,11 +72,13 @@ class SourceTextModuleDescriptor : public ZoneObject {
void AddExport(const AstRawString* export_name,
const AstRawString* import_name,
const AstRawString* module_request,
+ const ImportAssertions* import_assertions,
const Scanner::Location loc,
const Scanner::Location specifier_loc, Zone* zone);
// export * from "foo.js";
void AddStarExport(const AstRawString* module_request,
+ const ImportAssertions* import_assertions,
const Scanner::Location loc,
const Scanner::Location specifier_loc, Zone* zone);
@@ -114,20 +124,55 @@ class SourceTextModuleDescriptor : public ZoneObject {
enum CellIndexKind { kInvalid, kExport, kImport };
static CellIndexKind GetCellIndexKind(int cell_index);
- struct ModuleRequest {
+ class AstModuleRequest : public ZoneObject {
+ public:
+ // TODO(v8:10958): Consider storing module request location here
+ // instead of using separate ModuleRequestLocation struct.
+ AstModuleRequest(const AstRawString* specifier,
+ const ImportAssertions* import_assertions)
+ : specifier_(specifier), import_assertions_(import_assertions) {}
+
+ template <typename LocalIsolate>
+ Handle<v8::internal::ModuleRequest> Serialize(LocalIsolate* isolate) const;
+
+ const AstRawString* specifier() const { return specifier_; }
+ const ImportAssertions* import_assertions() const {
+ return import_assertions_;
+ }
+
+ private:
+ const AstRawString* specifier_;
+ const ImportAssertions* import_assertions_;
+ };
+
+ struct ModuleRequestLocation {
+ // The index at which we will place the request in SourceTextModuleInfo's
+ // module_requests FixedArray.
int index;
+
+ // The JS source code position of the request, used for reporting errors.
int position;
- ModuleRequest(int index, int position) : index(index), position(position) {}
+
+ ModuleRequestLocation(int index, int position)
+ : index(index), position(position) {}
};
// Custom content-based comparer for the below maps, to keep them stable
// across parses.
struct V8_EXPORT_PRIVATE AstRawStringComparer {
bool operator()(const AstRawString* lhs, const AstRawString* rhs) const;
+ static int ThreeWayCompare(const AstRawString* lhs,
+ const AstRawString* rhs);
+ };
+
+ struct V8_EXPORT_PRIVATE ModuleRequestComparer {
+ bool operator()(const AstModuleRequest* lhs,
+ const AstModuleRequest* rhs) const;
};
using ModuleRequestMap =
- ZoneMap<const AstRawString*, ModuleRequest, AstRawStringComparer>;
+ ZoneMap<const AstModuleRequest*, ModuleRequestLocation,
+ ModuleRequestComparer>;
using RegularExportMap =
ZoneMultimap<const AstRawString*, Entry*, AstRawStringComparer>;
using RegularImportMap =
@@ -224,13 +269,15 @@ class SourceTextModuleDescriptor : public ZoneObject {
void AssignCellIndices();
int AddModuleRequest(const AstRawString* specifier,
- Scanner::Location specifier_loc) {
+ const ImportAssertions* import_assertions,
+ Scanner::Location specifier_loc, Zone* zone) {
DCHECK_NOT_NULL(specifier);
int module_requests_count = static_cast<int>(module_requests_.size());
auto it = module_requests_
- .insert(std::make_pair(specifier,
- ModuleRequest(module_requests_count,
- specifier_loc.beg_pos)))
+ .insert(std::make_pair(
+ zone->New<AstModuleRequest>(specifier, import_assertions),
+ ModuleRequestLocation(module_requests_count,
+ specifier_loc.beg_pos)))
.first;
return it->second.index;
}
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index 20dca56cc4..e53d9c9e6e 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -258,6 +258,7 @@ void CallPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
Print("/");
if (node->flags() & RegExp::kGlobal) Print("g");
if (node->flags() & RegExp::kIgnoreCase) Print("i");
+ if (node->flags() & RegExp::kLinear) Print("l");
if (node->flags() & RegExp::kMultiline) Print("m");
if (node->flags() & RegExp::kUnicode) Print("u");
if (node->flags() & RegExp::kSticky) Print("y");
@@ -1163,6 +1164,7 @@ void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
EmbeddedVector<char, 128> buf;
if (node->flags() & RegExp::kGlobal) buf[i++] = 'g';
if (node->flags() & RegExp::kIgnoreCase) buf[i++] = 'i';
+ if (node->flags() & RegExp::kLinear) buf[i++] = 'l';
if (node->flags() & RegExp::kMultiline) buf[i++] = 'm';
if (node->flags() & RegExp::kUnicode) buf[i++] = 'u';
if (node->flags() & RegExp::kSticky) buf[i++] = 'y';
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index a5f4523670..e731d4c46a 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -705,8 +705,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
void SetDefaults();
- void set_scope_info(Handle<ScopeInfo> scope_info);
-
friend class DeclarationScope;
friend class ClassScope;
friend class ScopeTestHelper;
diff --git a/deps/v8/src/base/DIR_METADATA b/deps/v8/src/base/DIR_METADATA
new file mode 100644
index 0000000000..2f8dbbcf45
--- /dev/null
+++ b/deps/v8/src/base/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/base/OWNERS b/deps/v8/src/base/OWNERS
index 67dcc1cd98..8fcbc9e047 100644
--- a/deps/v8/src/base/OWNERS
+++ b/deps/v8/src/base/OWNERS
@@ -1,5 +1,3 @@
clemensb@chromium.org
ishell@chromium.org
mlippautz@chromium.org
-
-# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/base/bounded-page-allocator.h b/deps/v8/src/base/bounded-page-allocator.h
index d09aecee05..1c8c846711 100644
--- a/deps/v8/src/base/bounded-page-allocator.h
+++ b/deps/v8/src/base/bounded-page-allocator.h
@@ -29,6 +29,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
BoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
size_t size, size_t allocate_page_size);
+ BoundedPageAllocator(const BoundedPageAllocator&) = delete;
+ BoundedPageAllocator& operator=(const BoundedPageAllocator&) = delete;
~BoundedPageAllocator() override = default;
// These functions are not inlined to avoid https://crbug.com/v8/8275.
@@ -75,8 +77,6 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
const size_t commit_page_size_;
v8::PageAllocator* const page_allocator_;
v8::base::RegionAllocator region_allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(BoundedPageAllocator);
};
} // namespace base
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index ad287c9290..2bfbe1ba32 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -207,6 +207,10 @@ constexpr int kReturnAddressStackSlotCount =
// PPC has large (64KB) physical pages.
const int kPageSizeBits = 19;
#else
+// Arm64 supports up to 64k OS pages on Linux, however 4k pages are more common
+// so we keep the V8 page size at 256k. Nonetheless, we need to make sure we
+// don't decrease it further in the future due to reserving 3 OS pages for every
+// executable V8 page.
const int kPageSizeBits = 18;
#endif
diff --git a/deps/v8/src/base/debug/stack_trace_posix.cc b/deps/v8/src/base/debug/stack_trace_posix.cc
index ed602af547..270f1ca4e0 100644
--- a/deps/v8/src/base/debug/stack_trace_posix.cc
+++ b/deps/v8/src/base/debug/stack_trace_posix.cc
@@ -267,27 +267,28 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
class PrintBacktraceOutputHandler : public BacktraceOutputHandler {
public:
PrintBacktraceOutputHandler() = default;
+ PrintBacktraceOutputHandler(const PrintBacktraceOutputHandler&) = delete;
+ PrintBacktraceOutputHandler& operator=(const PrintBacktraceOutputHandler&) =
+ delete;
void HandleOutput(const char* output) override {
// NOTE: This code MUST be async-signal safe (it's used by in-process
// stack dumping signal handler). NO malloc or stdio is allowed here.
PrintToStderr(output);
}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(PrintBacktraceOutputHandler);
};
class StreamBacktraceOutputHandler : public BacktraceOutputHandler {
public:
explicit StreamBacktraceOutputHandler(std::ostream* os) : os_(os) {}
+ StreamBacktraceOutputHandler(const StreamBacktraceOutputHandler&) = delete;
+ StreamBacktraceOutputHandler& operator=(const StreamBacktraceOutputHandler&) =
+ delete;
void HandleOutput(const char* output) override { (*os_) << output; }
private:
std::ostream* os_;
-
- DISALLOW_COPY_AND_ASSIGN(StreamBacktraceOutputHandler);
};
void WarmUpBacktrace() {
diff --git a/deps/v8/src/base/hashmap-entry.h b/deps/v8/src/base/hashmap-entry.h
index 629e734088..2f984f3c2a 100644
--- a/deps/v8/src/base/hashmap-entry.h
+++ b/deps/v8/src/base/hashmap-entry.h
@@ -6,15 +6,25 @@
#define V8_BASE_HASHMAP_ENTRY_H_
#include <cstdint>
+#include <type_traits>
+
+#include "src/base/memory.h"
namespace v8 {
namespace base {
+// Marker type for hashmaps without a value (i.e. hashsets). These won't
+// allocate space for the value in the entry.
+struct NoHashMapValue {};
+
// HashMap entries are (key, value, hash) triplets, with a boolean indicating if
// they are an empty entry. Some clients may not need to use the value slot
-// (e.g. implementers of sets, where the key is the value).
+// (e.g. implementers of sets, where the key is the value), in which case they
+// should use NoHashMapValue.
template <typename Key, typename Value>
struct TemplateHashMapEntry {
+ STATIC_ASSERT((!std::is_same<Value, NoHashMapValue>::value));
+
Key key;
Value value;
uint32_t hash; // The full hash value for key
@@ -33,6 +43,8 @@ struct TemplateHashMapEntry {
// Specialization for pointer-valued keys
template <typename Key, typename Value>
struct TemplateHashMapEntry<Key*, Value> {
+ STATIC_ASSERT((!std::is_same<Value, NoHashMapValue>::value));
+
Key* key;
Value value;
uint32_t hash; // The full hash value for key
@@ -45,8 +57,42 @@ struct TemplateHashMapEntry<Key*, Value> {
void clear() { key = nullptr; }
};
-// TODO(leszeks): There could be a specialisation for void values (e.g. for
-// sets), which omits the value field
+// Specialization for no value.
+template <typename Key>
+struct TemplateHashMapEntry<Key, NoHashMapValue> {
+ union {
+ Key key;
+ NoHashMapValue value; // Value in union with key to not take up space.
+ };
+ uint32_t hash; // The full hash value for key
+
+ TemplateHashMapEntry(Key key, NoHashMapValue value, uint32_t hash)
+ : key(key), hash(hash), exists_(true) {}
+
+ bool exists() const { return exists_; }
+
+ void clear() { exists_ = false; }
+
+ private:
+ bool exists_;
+};
+
+// Specialization for pointer-valued keys and no value.
+template <typename Key>
+struct TemplateHashMapEntry<Key*, NoHashMapValue> {
+ union {
+ Key* key;
+ NoHashMapValue value; // Value in union with key to not take up space.
+ };
+ uint32_t hash; // The full hash value for key
+
+ TemplateHashMapEntry(Key* key, NoHashMapValue value, uint32_t hash)
+ : key(key), hash(hash) {}
+
+ bool exists() const { return key != nullptr; }
+
+ void clear() { key = nullptr; }
+};
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/hashmap.h b/deps/v8/src/base/hashmap.h
index 2b40b329b8..c0a7f21bf5 100644
--- a/deps/v8/src/base/hashmap.h
+++ b/deps/v8/src/base/hashmap.h
@@ -46,6 +46,9 @@ class TemplateHashMapImpl {
MatchFun match = MatchFun(),
AllocationPolicy allocator = AllocationPolicy());
+ TemplateHashMapImpl(const TemplateHashMapImpl&) = delete;
+ TemplateHashMapImpl& operator=(const TemplateHashMapImpl&) = delete;
+
// Clones the given hashmap and creates a copy with the same entries.
explicit TemplateHashMapImpl(const TemplateHashMapImpl* original,
AllocationPolicy allocator = AllocationPolicy());
@@ -72,6 +75,20 @@ class TemplateHashMapImpl {
template <typename Func>
Entry* LookupOrInsert(const Key& key, uint32_t hash, const Func& value_func);
+ // Heterogeneous version of LookupOrInsert, which allows a
+ // different lookup key type than the hashmap's key type.
+ // The requirement is that MatchFun has an overload:
+ //
+ // operator()(const LookupKey& lookup_key, const Key& entry_key)
+ //
+ // If an entry with matching key is found, returns that entry.
+ // If no matching entry is found, a new entry is inserted with
+ // a key created by key_func, key hash, and value created by
+ // value_func.
+ template <typename LookupKey, typename KeyFunc, typename ValueFunc>
+ Entry* LookupOrInsert(const LookupKey& lookup_key, uint32_t hash,
+ const KeyFunc& key_func, const ValueFunc& value_func);
+
Entry* InsertNew(const Key& key, uint32_t hash);
// Removes the entry with matching key.
@@ -115,7 +132,8 @@ class TemplateHashMapImpl {
private:
Entry* map_end() const { return impl_.map_ + impl_.capacity_; }
- Entry* Probe(const Key& key, uint32_t hash) const;
+ template <typename LookupKey>
+ Entry* Probe(const LookupKey& key, uint32_t hash) const;
Entry* FillEmptyEntry(Entry* entry, const Key& key, const Value& value,
uint32_t hash);
void Resize();
@@ -160,8 +178,6 @@ class TemplateHashMapImpl {
uint32_t capacity_ = 0;
uint32_t occupancy_ = 0;
} impl_;
-
- DISALLOW_COPY_AND_ASSIGN(TemplateHashMapImpl);
};
template <typename Key, typename Value, typename MatchFun,
class AllocationPolicy>
@@ -214,13 +230,24 @@ template <typename Func>
typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::LookupOrInsert(
const Key& key, uint32_t hash, const Func& value_func) {
+ return LookupOrInsert(
+ key, hash, [&key]() { return key; }, value_func);
+}
+
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
+template <typename LookupKey, typename KeyFunc, typename ValueFunc>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::LookupOrInsert(
+ const LookupKey& lookup_key, uint32_t hash, const KeyFunc& key_func,
+ const ValueFunc& value_func) {
// Find a matching entry.
- Entry* entry = Probe(key, hash);
+ Entry* entry = Probe(lookup_key, hash);
if (entry->exists()) {
return entry;
}
- return FillEmptyEntry(entry, key, value_func(), hash);
+ return FillEmptyEntry(entry, key_func(), value_func(), hash);
}
template <typename Key, typename Value, typename MatchFun,
@@ -328,9 +355,10 @@ TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Next(
template <typename Key, typename Value, typename MatchFun,
class AllocationPolicy>
+template <typename LookupKey>
typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Probe(
- const Key& key, uint32_t hash) const {
+ const LookupKey& key, uint32_t hash) const {
DCHECK(base::bits::IsPowerOfTwo(capacity()));
size_t i = hash & (capacity() - 1);
DCHECK(i < capacity());
@@ -442,8 +470,10 @@ class CustomMatcherTemplateHashMapImpl
AllocationPolicy allocator = AllocationPolicy())
: Base(original, allocator) {}
- private:
- DISALLOW_COPY_AND_ASSIGN(CustomMatcherTemplateHashMapImpl);
+ CustomMatcherTemplateHashMapImpl(const CustomMatcherTemplateHashMapImpl&) =
+ delete;
+ CustomMatcherTemplateHashMapImpl& operator=(
+ const CustomMatcherTemplateHashMapImpl&) = delete;
};
using CustomMatcherHashMap =
diff --git a/deps/v8/src/base/lazy-instance.h b/deps/v8/src/base/lazy-instance.h
index 3ea5fc9575..75e5b06006 100644
--- a/deps/v8/src/base/lazy-instance.h
+++ b/deps/v8/src/base/lazy-instance.h
@@ -235,12 +235,13 @@ class LeakyObject {
new (&storage_) T(std::forward<Args>(args)...);
}
+ LeakyObject(const LeakyObject&) = delete;
+ LeakyObject& operator=(const LeakyObject&) = delete;
+
T* get() { return reinterpret_cast<T*>(&storage_); }
private:
typename std::aligned_storage<sizeof(T), alignof(T)>::type storage_;
-
- DISALLOW_COPY_AND_ASSIGN(LeakyObject);
};
// Define a function which returns a pointer to a lazily initialized and never
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 37cab78f08..8b39da2451 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -109,11 +109,15 @@ V8_INLINE Dest bit_cast(Source const& source) {
}
// Explicitly declare the assignment operator as deleted.
+// Note: This macro is deprecated and will be removed soon. Please explicitly
+// delete the assignment operator instead.
#define DISALLOW_ASSIGN(TypeName) TypeName& operator=(const TypeName&) = delete
// Explicitly declare the copy constructor and assignment operator as deleted.
// This also deletes the implicit move constructor and implicit move assignment
// operator, but still allows to manually define them.
+// Note: This macro is deprecated and will be removed soon. Please explicitly
+// delete the copy constructor and assignment operator instead.
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&) = delete; \
DISALLOW_ASSIGN(TypeName)
diff --git a/deps/v8/src/base/platform/DIR_METADATA b/deps/v8/src/base/platform/DIR_METADATA
new file mode 100644
index 0000000000..2f8dbbcf45
--- /dev/null
+++ b/deps/v8/src/base/platform/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/base/platform/OWNERS b/deps/v8/src/base/platform/OWNERS
index bf5455c9af..782eb7c684 100644
--- a/deps/v8/src/base/platform/OWNERS
+++ b/deps/v8/src/base/platform/OWNERS
@@ -3,5 +3,3 @@ mlippautz@chromium.org
ulan@chromium.org
per-file platform-fuchsia.cc=wez@chromium.org
-
-# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/base/platform/condition-variable.h b/deps/v8/src/base/platform/condition-variable.h
index 8b5c7cf569..79e653a32a 100644
--- a/deps/v8/src/base/platform/condition-variable.h
+++ b/deps/v8/src/base/platform/condition-variable.h
@@ -36,6 +36,8 @@ class TimeDelta;
class V8_BASE_EXPORT ConditionVariable final {
public:
ConditionVariable();
+ ConditionVariable(const ConditionVariable&) = delete;
+ ConditionVariable& operator=(const ConditionVariable&) = delete;
~ConditionVariable();
// If any threads are waiting on this condition variable, calling
@@ -81,8 +83,6 @@ class V8_BASE_EXPORT ConditionVariable final {
private:
NativeHandle native_handle_;
-
- DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
};
// POD ConditionVariable initialized lazily (i.e. the first time Pointer() is
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index 7a19b2f4aa..1b950c61ad 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -43,6 +43,8 @@ namespace base {
class V8_BASE_EXPORT Mutex final {
public:
Mutex();
+ Mutex(const Mutex&) = delete;
+ Mutex& operator=(const Mutex&) = delete;
~Mutex();
// Locks the given mutex. If the mutex is currently unlocked, it becomes
@@ -99,8 +101,6 @@ class V8_BASE_EXPORT Mutex final {
}
friend class ConditionVariable;
-
- DISALLOW_COPY_AND_ASSIGN(Mutex);
};
// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
@@ -140,6 +140,8 @@ using LazyMutex = LazyStaticInstance<Mutex, DefaultConstructTrait<Mutex>,
class V8_BASE_EXPORT RecursiveMutex final {
public:
RecursiveMutex();
+ RecursiveMutex(const RecursiveMutex&) = delete;
+ RecursiveMutex& operator=(const RecursiveMutex&) = delete;
~RecursiveMutex();
// Locks the mutex. If another thread has already locked the mutex, a call to
@@ -175,8 +177,6 @@ class V8_BASE_EXPORT RecursiveMutex final {
#ifdef DEBUG
int level_;
#endif
-
- DISALLOW_COPY_AND_ASSIGN(RecursiveMutex);
};
@@ -213,6 +213,8 @@ using LazyRecursiveMutex =
class V8_BASE_EXPORT SharedMutex final {
public:
SharedMutex();
+ SharedMutex(const SharedMutex&) = delete;
+ SharedMutex& operator=(const SharedMutex&) = delete;
~SharedMutex();
// Acquires shared ownership of the {SharedMutex}. If another thread is
@@ -262,8 +264,6 @@ class V8_BASE_EXPORT SharedMutex final {
#endif
NativeHandle native_handle_;
-
- DISALLOW_COPY_AND_ASSIGN(SharedMutex);
};
// -----------------------------------------------------------------------------
@@ -286,6 +286,8 @@ class LockGuard final {
explicit LockGuard(Mutex* mutex) : mutex_(mutex) {
if (has_mutex()) mutex_->Lock();
}
+ LockGuard(const LockGuard&) = delete;
+ LockGuard& operator=(const LockGuard&) = delete;
~LockGuard() {
if (has_mutex()) mutex_->Unlock();
}
@@ -298,8 +300,6 @@ class LockGuard final {
mutex_ != nullptr);
return Behavior == NullBehavior::kRequireNotNull || mutex_ != nullptr;
}
-
- DISALLOW_COPY_AND_ASSIGN(LockGuard);
};
using MutexGuard = LockGuard<Mutex>;
@@ -319,6 +319,8 @@ class SharedMutexGuard final {
mutex_->LockExclusive();
}
}
+ SharedMutexGuard(const SharedMutexGuard&) = delete;
+ SharedMutexGuard& operator=(const SharedMutexGuard&) = delete;
~SharedMutexGuard() {
if (!has_mutex()) return;
if (kIsShared) {
@@ -336,8 +338,6 @@ class SharedMutexGuard final {
mutex_ != nullptr);
return Behavior == NullBehavior::kRequireNotNull || mutex_ != nullptr;
}
-
- DISALLOW_COPY_AND_ASSIGN(SharedMutexGuard);
};
} // namespace base
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
index e1ccda2ab0..6b6a870370 100644
--- a/deps/v8/src/base/platform/platform-aix.cc
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -130,7 +130,7 @@ void OS::SignalCodeMovingGC() {}
void OS::AdjustSchedulingParams() {}
// static
-void* Stack::GetStackStart() {
+Stack::StackSlot Stack::GetStackStart() {
// pthread_getthrds_np creates 3 values:
// __pi_stackaddr, __pi_stacksize, __pi_stackend
diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc
index ed16ad096d..edc793c662 100644
--- a/deps/v8/src/base/platform/platform-freebsd.cc
+++ b/deps/v8/src/base/platform/platform-freebsd.cc
@@ -98,7 +98,7 @@ void OS::SignalCodeMovingGC() {}
void OS::AdjustSchedulingParams() {}
// static
-void* Stack::GetStackStart() {
+Stack::StackSlot Stack::GetStackStart() {
pthread_attr_t attr;
int error;
pthread_attr_init(&attr);
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index 35a508a140..381b59a904 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -4,6 +4,7 @@
#include <zircon/process.h>
#include <zircon/syscalls.h>
+#include <zircon/threads.h>
#include "src/base/macros.h"
#include "src/base/platform/platform-posix-time.h"
@@ -151,17 +152,18 @@ void OS::SignalCodeMovingGC() {
int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
const auto kNanosPerMicrosecond = 1000ULL;
const auto kMicrosPerSecond = 1000000ULL;
- zx_time_t nanos_since_thread_started;
- zx_status_t status =
- zx_clock_get(ZX_CLOCK_THREAD, &nanos_since_thread_started);
+
+ zx_info_thread_stats_t info = {};
+ zx_status_t status = zx_object_get_info(thrd_get_zx_handle(thrd_current()),
+ ZX_INFO_THREAD_STATS, &info,
+ sizeof(info), nullptr, nullptr);
if (status != ZX_OK) {
return -1;
}
// First convert to microseconds, rounding up.
const uint64_t micros_since_thread_started =
- (nanos_since_thread_started + kNanosPerMicrosecond - 1ULL) /
- kNanosPerMicrosecond;
+ (info.total_runtime + kNanosPerMicrosecond - 1ULL) / kNanosPerMicrosecond;
*secs = static_cast<uint32_t>(micros_since_thread_started / kMicrosPerSecond);
*usecs =
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
index bee6b30f7c..3f1638ec0d 100644
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ b/deps/v8/src/base/platform/platform-macos.cc
@@ -94,7 +94,7 @@ void OS::AdjustSchedulingParams() {
}
// static
-void* Stack::GetStackStart() {
+Stack::StackSlot Stack::GetStackStart() {
return pthread_get_stackaddr_np(pthread_self());
}
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index d5624cb8ac..ab0d7839a4 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -415,16 +415,6 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
int prot = GetProtectionFromMemoryPermission(access);
int ret = mprotect(address, size, prot);
-
- // MacOS 11.2 on Apple Silicon refuses to switch permissions from
- // rwx to none. Just use madvise instead.
-#if defined(V8_OS_MACOSX)
- if (ret != 0 && access == OS::MemoryPermission::kNoAccess) {
- ret = madvise(address, size, MADV_FREE_REUSABLE);
- return ret == 0;
- }
-#endif
-
if (ret == 0 && access == OS::MemoryPermission::kNoAccess) {
// This is advisory; ignore errors and continue execution.
USE(DiscardSystemPages(address, size));
@@ -1013,7 +1003,7 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
!defined(V8_OS_SOLARIS)
// static
-void* Stack::GetStackStart() {
+Stack::StackSlot Stack::GetStackStart() {
pthread_attr_t attr;
int error = pthread_getattr_np(pthread_self(), &attr);
if (!error) {
@@ -1039,7 +1029,9 @@ void* Stack::GetStackStart() {
// !defined(_AIX) && !defined(V8_OS_SOLARIS)
// static
-void* Stack::GetCurrentStackPosition() { return __builtin_frame_address(0); }
+Stack::StackSlot Stack::GetCurrentStackPosition() {
+ return __builtin_frame_address(0);
+}
#undef LOG_TAG
#undef MAP_ANONYMOUS
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index e7b1e51936..cee24e9876 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -1395,7 +1395,7 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
void OS::AdjustSchedulingParams() {}
// static
-void* Stack::GetStackStart() {
+Stack::StackSlot Stack::GetStackStart() {
#if defined(V8_TARGET_ARCH_X64)
return reinterpret_cast<void*>(
reinterpret_cast<NT_TIB64*>(NtCurrentTeb())->StackBase);
@@ -1414,7 +1414,7 @@ void* Stack::GetStackStart() {
}
// static
-void* Stack::GetCurrentStackPosition() {
+Stack::StackSlot Stack::GetCurrentStackPosition() {
#if V8_CC_MSVC
return _AddressOfReturnAddress();
#else
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 9c52f21804..042e4428cd 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -22,6 +22,7 @@
#define V8_BASE_PLATFORM_PLATFORM_H_
#include <cstdarg>
+#include <cstdint>
#include <string>
#include <vector>
@@ -353,6 +354,8 @@ class V8_BASE_EXPORT Thread {
// Create new thread.
explicit Thread(const Options& options);
+ Thread(const Thread&) = delete;
+ Thread& operator=(const Thread&) = delete;
virtual ~Thread();
// Start new thread by calling the Run() method on the new thread.
@@ -426,37 +429,48 @@ class V8_BASE_EXPORT Thread {
char name_[kMaxThreadNameLength];
int stack_size_;
Semaphore* start_semaphore_;
-
- DISALLOW_COPY_AND_ASSIGN(Thread);
};
// TODO(v8:10354): Make use of the stack utilities here in V8.
class V8_BASE_EXPORT Stack {
public:
+ // Convenience wrapper to use stack slots as unsigned values or void*
+ // pointers.
+ struct StackSlot {
+ // NOLINTNEXTLINE
+ StackSlot(void* value) : value(reinterpret_cast<uintptr_t>(value)) {}
+ StackSlot(uintptr_t value) : value(value) {} // NOLINT
+
+ // NOLINTNEXTLINE
+ operator void*() const { return reinterpret_cast<void*>(value); }
+ operator uintptr_t() const { return value; } // NOLINT
+
+ uintptr_t value;
+ };
+
// Gets the start of the stack of the current thread.
- static void* GetStackStart();
+ static StackSlot GetStackStart();
// Returns the current stack top. Works correctly with ASAN and SafeStack.
// GetCurrentStackPosition() should not be inlined, because it works on stack
// frames if it were inlined into a function with a huge stack frame it would
// return an address significantly above the actual current stack position.
- static V8_NOINLINE void* GetCurrentStackPosition();
+ static V8_NOINLINE StackSlot GetCurrentStackPosition();
- // Translates an ASAN-based slot to a real stack slot if necessary.
- static void* GetStackSlot(void* slot) {
+ // Returns the real stack frame if slot is part of a fake frame, and slot
+ // otherwise.
+ static StackSlot GetRealStackAddressForSlot(StackSlot slot) {
#ifdef V8_USE_ADDRESS_SANITIZER
- void* fake_stack = __asan_get_current_fake_stack();
- if (fake_stack) {
- void* fake_frame_start;
- void* real_frame = __asan_addr_is_in_fake_stack(
- fake_stack, slot, &fake_frame_start, nullptr);
- if (real_frame) {
- return reinterpret_cast<void*>(
- reinterpret_cast<uintptr_t>(real_frame) +
- (reinterpret_cast<uintptr_t>(slot) -
- reinterpret_cast<uintptr_t>(fake_frame_start)));
- }
- }
+ // ASAN fetches the real stack deeper in the __asan_addr_is_in_fake_stack()
+ // call (precisely, deeper in __asan_stack_malloc_()), which results in a
+ // real frame that could be outside of stack bounds. Adjust for this
+ // impreciseness here.
+ constexpr size_t kAsanRealFrameOffsetBytes = 32;
+ void* real_frame = __asan_addr_is_in_fake_stack(
+ __asan_get_current_fake_stack(), slot, nullptr, nullptr);
+ return real_frame
+ ? (static_cast<char*>(real_frame) + kAsanRealFrameOffsetBytes)
+ : slot;
#endif // V8_USE_ADDRESS_SANITIZER
return slot;
}
diff --git a/deps/v8/src/base/platform/semaphore.h b/deps/v8/src/base/platform/semaphore.h
index 0c0b877da2..83a7a3392f 100644
--- a/deps/v8/src/base/platform/semaphore.h
+++ b/deps/v8/src/base/platform/semaphore.h
@@ -39,6 +39,8 @@ class TimeDelta;
class V8_BASE_EXPORT Semaphore final {
public:
explicit Semaphore(int count);
+ Semaphore(const Semaphore&) = delete;
+ Semaphore& operator=(const Semaphore&) = delete;
~Semaphore();
// Increments the semaphore counter.
@@ -72,8 +74,6 @@ class V8_BASE_EXPORT Semaphore final {
private:
NativeHandle native_handle_;
-
- DISALLOW_COPY_AND_ASSIGN(Semaphore);
};
diff --git a/deps/v8/src/base/platform/wrappers.h b/deps/v8/src/base/platform/wrappers.h
new file mode 100644
index 0000000000..521b06ebe1
--- /dev/null
+++ b/deps/v8/src/base/platform/wrappers.h
@@ -0,0 +1,31 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_WRAPPERS_H_
+#define V8_BASE_PLATFORM_WRAPPERS_H_
+
+#include <stddef.h>
+#include <stdio.h>
+
+namespace v8 {
+namespace base {
+
+void* Malloc(size_t size);
+
+void* Realloc(void* memory, size_t size);
+
+void Free(void* memory);
+
+void* Calloc(size_t count, size_t size);
+
+void* Memcpy(void* dest, const void* source, size_t count);
+
+FILE* Fopen(const char* filename, const char* mode);
+
+int Fclose(FILE* stream);
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_PLATFORM_WRAPPERS_H_
diff --git a/deps/v8/src/base/platform/wrappers_starboard.cc b/deps/v8/src/base/platform/wrappers_starboard.cc
new file mode 100644
index 0000000000..199e753409
--- /dev/null
+++ b/deps/v8/src/base/platform/wrappers_starboard.cc
@@ -0,0 +1,31 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "starboard/memory.h"
+
+#include "src/base/platform/wrappers.h"
+
+namespace v8 {
+namespace base {
+
+void* Malloc(size_t size) { return SbMemoryAlloc(size); }
+
+void* Realloc(void* memory, size_t size) {
+ return SbMemoryReallocate(memory, size);
+}
+
+void Free(void* memory) { return SbMemoryDeallocate(memory); }
+
+void* Calloc(size_t count, size_t size) { return SbMemoryCalloc(count, size); }
+
+void* Memcpy(void* dest, const void* source, size_t count) {
+ return SbMemoryCopy(dest, source, count);
+}
+
+FILE* Fopen(const char* filename, const char* mode) { return NULL; }
+
+int Fclose(FILE* stream) { return -1; }
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/wrappers_std.cc b/deps/v8/src/base/platform/wrappers_std.cc
new file mode 100644
index 0000000000..6b38b18e37
--- /dev/null
+++ b/deps/v8/src/base/platform/wrappers_std.cc
@@ -0,0 +1,34 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/base/platform/wrappers.h"
+
+namespace v8 {
+namespace base {
+
+void* Malloc(size_t size) { return malloc(size); }
+
+void* Realloc(void* memory, size_t size) { return realloc(memory, size); }
+
+void Free(void* memory) { return free(memory); }
+
+void* Calloc(size_t count, size_t size) { return calloc(count, size); }
+
+void* Memcpy(void* dest, const void* source, size_t count) {
+ return memcpy(dest, source, count);
+}
+
+FILE* Fopen(const char* filename, const char* mode) {
+ return fopen(filename, mode);
+}
+
+int Fclose(FILE* stream) { return fclose(stream); }
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/region-allocator.h b/deps/v8/src/base/region-allocator.h
index 887f123b10..adc4bd10b6 100644
--- a/deps/v8/src/base/region-allocator.h
+++ b/deps/v8/src/base/region-allocator.h
@@ -39,6 +39,8 @@ class V8_BASE_EXPORT RegionAllocator final {
};
RegionAllocator(Address address, size_t size, size_t page_size);
+ RegionAllocator(const RegionAllocator&) = delete;
+ RegionAllocator& operator=(const RegionAllocator&) = delete;
~RegionAllocator();
// Allocates region of |size| (must be |page_size|-aligned). Returns
@@ -176,8 +178,6 @@ class V8_BASE_EXPORT RegionAllocator final {
FRIEND_TEST(RegionAllocatorTest, Contains);
FRIEND_TEST(RegionAllocatorTest, FindRegion);
FRIEND_TEST(RegionAllocatorTest, Fragmentation);
-
- DISALLOW_COPY_AND_ASSIGN(RegionAllocator);
};
} // namespace base
diff --git a/deps/v8/src/base/ring-buffer.h b/deps/v8/src/base/ring-buffer.h
index b347977640..8357987083 100644
--- a/deps/v8/src/base/ring-buffer.h
+++ b/deps/v8/src/base/ring-buffer.h
@@ -14,7 +14,11 @@ template <typename T>
class RingBuffer {
public:
RingBuffer() { Reset(); }
+ RingBuffer(const RingBuffer&) = delete;
+ RingBuffer& operator=(const RingBuffer&) = delete;
+
static const int kSize = 10;
+
void Push(const T& value) {
if (count_ == kSize) {
elements_[start_++] = value;
@@ -45,7 +49,6 @@ class RingBuffer {
T elements_[kSize];
int start_;
int count_;
- DISALLOW_COPY_AND_ASSIGN(RingBuffer);
};
} // namespace base
diff --git a/deps/v8/src/base/safe_conversions.h b/deps/v8/src/base/safe_conversions.h
index f63f1ad99e..38aa7b9aaa 100644
--- a/deps/v8/src/base/safe_conversions.h
+++ b/deps/v8/src/base/safe_conversions.h
@@ -4,59 +4,383 @@
// Slightly adapted for inclusion in V8.
// Copyright 2014 the V8 project authors. All rights reserved.
+// List of adaptations:
+// - include guard names
+// - wrap in v8 namespace
+// - formatting (git cl format)
+// - include paths
#ifndef V8_BASE_SAFE_CONVERSIONS_H_
#define V8_BASE_SAFE_CONVERSIONS_H_
+#include <stddef.h>
+
+#include <cmath>
#include <limits>
+#include <type_traits>
#include "src/base/safe_conversions_impl.h"
+#if defined(__ARMEL__) && !defined(__native_client__)
+#include "src/base/safe_conversions_arm_impl.h"
+#define BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (1)
+#else
+#define BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (0)
+#endif
+
+#if !BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
+#include <ostream>
+#endif
+
namespace v8 {
namespace base {
+namespace internal {
+
+#if !BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+template <typename Dst, typename Src>
+struct SaturateFastAsmOp {
+ static constexpr bool is_supported = false;
+ static constexpr Dst Do(Src) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<Dst>();
+ }
+};
+#endif // BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+#undef BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+
+// The following special case a few specific integer conversions where we can
+// eke out better performance than range checking.
+template <typename Dst, typename Src, typename Enable = void>
+struct IsValueInRangeFastOp {
+ static constexpr bool is_supported = false;
+ static constexpr bool Do(Src value) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+// Signed to signed range comparison.
+template <typename Dst, typename Src>
+struct IsValueInRangeFastOp<
+ Dst, Src,
+ typename std::enable_if<
+ std::is_integral<Dst>::value && std::is_integral<Src>::value &&
+ std::is_signed<Dst>::value && std::is_signed<Src>::value &&
+ !IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
+ static constexpr bool is_supported = true;
+
+ static constexpr bool Do(Src value) {
+ // Just downcast to the smaller type, sign extend it back to the original
+ // type, and then see if it matches the original value.
+ return value == static_cast<Dst>(value);
+ }
+};
+
+// Signed to unsigned range comparison.
+template <typename Dst, typename Src>
+struct IsValueInRangeFastOp<
+ Dst, Src,
+ typename std::enable_if<
+ std::is_integral<Dst>::value && std::is_integral<Src>::value &&
+ !std::is_signed<Dst>::value && std::is_signed<Src>::value &&
+ !IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
+ static constexpr bool is_supported = true;
+
+ static constexpr bool Do(Src value) {
+ // We cast a signed as unsigned to overflow negative values to the top,
+ // then compare against whichever maximum is smaller, as our upper bound.
+ return as_unsigned(value) <= as_unsigned(CommonMax<Src, Dst>());
+ }
+};
// Convenience function that returns true if the supplied value is in range
// for the destination type.
template <typename Dst, typename Src>
-inline bool IsValueInRangeForNumericType(Src value) {
- return internal::DstRangeRelationToSrcRange<Dst>(value) ==
- internal::RANGE_VALID;
+constexpr bool IsValueInRangeForNumericType(Src value) {
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return internal::IsValueInRangeFastOp<Dst, SrcType>::is_supported
+ ? internal::IsValueInRangeFastOp<Dst, SrcType>::Do(
+ static_cast<SrcType>(value))
+ : internal::DstRangeRelationToSrcRange<Dst>(
+ static_cast<SrcType>(value))
+ .IsValid();
}
// checked_cast<> is analogous to static_cast<> for numeric types,
// except that it CHECKs that the specified numeric conversion will not
// overflow or underflow. NaN source will always trigger a CHECK.
-template <typename Dst, typename Src>
-inline Dst checked_cast(Src value) {
- CHECK(IsValueInRangeForNumericType<Dst>(value));
- return static_cast<Dst>(value);
+template <typename Dst, class CheckHandler = internal::CheckOnFailure,
+ typename Src>
+constexpr Dst checked_cast(Src value) {
+ // This throws a compile-time error on evaluating the constexpr if it can be
+ // determined at compile-time as failing, otherwise it will CHECK at runtime.
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return BASE_NUMERICS_LIKELY((IsValueInRangeForNumericType<Dst>(value)))
+ ? static_cast<Dst>(static_cast<SrcType>(value))
+ : CheckHandler::template HandleFailure<Dst>();
}
+// Default boundaries for integral/float: max/infinity, lowest/-infinity, 0/NaN.
+// You may provide your own limits (e.g. to saturated_cast) so long as you
+// implement all of the static constexpr member functions in the class below.
+template <typename T>
+struct SaturationDefaultLimits : public std::numeric_limits<T> {
+ static constexpr T NaN() {
+ return std::numeric_limits<T>::has_quiet_NaN
+ ? std::numeric_limits<T>::quiet_NaN()
+ : T();
+ }
+ using std::numeric_limits<T>::max;
+ static constexpr T Overflow() {
+ return std::numeric_limits<T>::has_infinity
+ ? std::numeric_limits<T>::infinity()
+ : std::numeric_limits<T>::max();
+ }
+ using std::numeric_limits<T>::lowest;
+ static constexpr T Underflow() {
+ return std::numeric_limits<T>::has_infinity
+ ? std::numeric_limits<T>::infinity() * -1
+ : std::numeric_limits<T>::lowest();
+ }
+};
+
+template <typename Dst, template <typename> class S, typename Src>
+constexpr Dst saturated_cast_impl(Src value, RangeCheck constraint) {
+ // For some reason clang generates much better code when the branch is
+ // structured exactly this way, rather than a sequence of checks.
+ return !constraint.IsOverflowFlagSet()
+ ? (!constraint.IsUnderflowFlagSet() ? static_cast<Dst>(value)
+ : S<Dst>::Underflow())
+ // Skip this check for integral Src, which cannot be NaN.
+ : (std::is_integral<Src>::value || !constraint.IsUnderflowFlagSet()
+ ? S<Dst>::Overflow()
+ : S<Dst>::NaN());
+}
+
+// We can reduce the number of conditions and get slightly better performance
+// for normal signed and unsigned integer ranges. And in the specific case of
+// Arm, we can use the optimized saturation instructions.
+template <typename Dst, typename Src, typename Enable = void>
+struct SaturateFastOp {
+ static constexpr bool is_supported = false;
+ static constexpr Dst Do(Src value) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<Dst>();
+ }
+};
+
+template <typename Dst, typename Src>
+struct SaturateFastOp<
+ Dst, Src,
+ typename std::enable_if<std::is_integral<Src>::value &&
+ std::is_integral<Dst>::value &&
+ SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
+ static constexpr bool is_supported = true;
+ static constexpr Dst Do(Src value) {
+ return SaturateFastAsmOp<Dst, Src>::Do(value);
+ }
+};
+
+template <typename Dst, typename Src>
+struct SaturateFastOp<
+ Dst, Src,
+ typename std::enable_if<std::is_integral<Src>::value &&
+ std::is_integral<Dst>::value &&
+ !SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
+ static constexpr bool is_supported = true;
+ static constexpr Dst Do(Src value) {
+ // The exact order of the following is structured to hit the correct
+ // optimization heuristics across compilers. Do not change without
+ // checking the emitted code.
+ const Dst saturated = CommonMaxOrMin<Dst, Src>(
+ IsMaxInRangeForNumericType<Dst, Src>() ||
+ (!IsMinInRangeForNumericType<Dst, Src>() && IsValueNegative(value)));
+ return BASE_NUMERICS_LIKELY(IsValueInRangeForNumericType<Dst>(value))
+ ? static_cast<Dst>(value)
+ : saturated;
+ }
+};
+
// saturated_cast<> is analogous to static_cast<> for numeric types, except
-// that the specified numeric conversion will saturate rather than overflow or
-// underflow. NaN assignment to an integral will trigger a CHECK condition.
+// that the specified numeric conversion will saturate by default rather than
+// overflow or underflow, and NaN assignment to an integral will return 0.
+// All boundary condition behaviors can be overriden with a custom handler.
+template <typename Dst,
+ template <typename> class SaturationHandler = SaturationDefaultLimits,
+ typename Src>
+constexpr Dst saturated_cast(Src value) {
+ using SrcType = typename UnderlyingType<Src>::type;
+ return !IsCompileTimeConstant(value) &&
+ SaturateFastOp<Dst, SrcType>::is_supported &&
+ std::is_same<SaturationHandler<Dst>,
+ SaturationDefaultLimits<Dst>>::value
+ ? SaturateFastOp<Dst, SrcType>::Do(static_cast<SrcType>(value))
+ : saturated_cast_impl<Dst, SaturationHandler, SrcType>(
+ static_cast<SrcType>(value),
+ DstRangeRelationToSrcRange<Dst, SaturationHandler, SrcType>(
+ static_cast<SrcType>(value)));
+}
+
+// strict_cast<> is analogous to static_cast<> for numeric types, except that
+// it will cause a compile failure if the destination type is not large enough
+// to contain any value in the source type. It performs no runtime checking.
template <typename Dst, typename Src>
-inline Dst saturated_cast(Src value) {
- // Optimization for floating point values, which already saturate.
- if (std::numeric_limits<Dst>::is_iec559)
- return static_cast<Dst>(value);
+constexpr Dst strict_cast(Src value) {
+ using SrcType = typename UnderlyingType<Src>::type;
+ static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
+ static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+
+ // If you got here from a compiler error, it's because you tried to assign
+ // from a source type to a destination type that has insufficient range.
+ // The solution may be to change the destination type you're assigning to,
+ // and use one large enough to represent the source.
+ // Alternatively, you may be better served with the checked_cast<> or
+ // saturated_cast<> template functions for your particular use case.
+ static_assert(StaticDstRangeRelationToSrcRange<Dst, SrcType>::value ==
+ NUMERIC_RANGE_CONTAINED,
+ "The source type is out of range for the destination type. "
+ "Please see strict_cast<> comments for more information.");
+
+ return static_cast<Dst>(static_cast<SrcType>(value));
+}
+
+// Some wrappers to statically check that a type is in range.
+template <typename Dst, typename Src, class Enable = void>
+struct IsNumericRangeContained {
+ static constexpr bool value = false;
+};
+
+template <typename Dst, typename Src>
+struct IsNumericRangeContained<
+ Dst, Src,
+ typename std::enable_if<ArithmeticOrUnderlyingEnum<Dst>::value &&
+ ArithmeticOrUnderlyingEnum<Src>::value>::type> {
+ static constexpr bool value =
+ StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
+ NUMERIC_RANGE_CONTAINED;
+};
+
+// StrictNumeric implements compile time range checking between numeric types by
+// wrapping assignment operations in a strict_cast. This class is intended to be
+// used for function arguments and return types, to ensure the destination type
+// can always contain the source type. This is essentially the same as enforcing
+// -Wconversion in gcc and C4302 warnings on MSVC, but it can be applied
+// incrementally at API boundaries, making it easier to convert code so that it
+// compiles cleanly with truncation warnings enabled.
+// This template should introduce no runtime overhead, but it also provides no
+// runtime checking of any of the associated mathematical operations. Use
+// CheckedNumeric for runtime range checks of the actual value being assigned.
+template <typename T>
+class StrictNumeric {
+ public:
+ using type = T;
- switch (internal::DstRangeRelationToSrcRange<Dst>(value)) {
- case internal::RANGE_VALID:
- return static_cast<Dst>(value);
+ constexpr StrictNumeric() : value_(0) {}
- case internal::RANGE_UNDERFLOW:
- return std::numeric_limits<Dst>::min();
+ // Copy constructor.
+ template <typename Src>
+ constexpr StrictNumeric(const StrictNumeric<Src>& rhs)
+ : value_(strict_cast<T>(rhs.value_)) {}
- case internal::RANGE_OVERFLOW:
- return std::numeric_limits<Dst>::max();
+ // This is not an explicit constructor because we implicitly upgrade regular
+ // numerics to StrictNumerics to make them easier to use.
+ template <typename Src>
+ constexpr StrictNumeric(Src value) // NOLINT(runtime/explicit)
+ : value_(strict_cast<T>(value)) {}
- // Should fail only on attempting to assign NaN to a saturated integer.
- case internal::RANGE_INVALID:
- UNREACHABLE();
+ // If you got here from a compiler error, it's because you tried to assign
+ // from a source type to a destination type that has insufficient range.
+ // The solution may be to change the destination type you're assigning to,
+ // and use one large enough to represent the source.
+ // If you're assigning from a CheckedNumeric<> class, you may be able to use
+ // the AssignIfValid() member function, specify a narrower destination type to
+ // the member value functions (e.g. val.template ValueOrDie<Dst>()), use one
+ // of the value helper functions (e.g. ValueOrDieForType<Dst>(val)).
+ // If you've encountered an _ambiguous overload_ you can use a static_cast<>
+ // to explicitly cast the result to the destination type.
+ // If none of that works, you may be better served with the checked_cast<> or
+ // saturated_cast<> template functions for your particular use case.
+ template <typename Dst, typename std::enable_if<IsNumericRangeContained<
+ Dst, T>::value>::type* = nullptr>
+ constexpr operator Dst() const {
+ return static_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(value_);
}
- UNREACHABLE();
+ private:
+ const T value_;
+};
+
+// Convience wrapper returns a StrictNumeric from the provided arithmetic type.
+template <typename T>
+constexpr StrictNumeric<typename UnderlyingType<T>::type> MakeStrictNum(
+ const T value) {
+ return value;
+}
+
+#if !BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
+// Overload the ostream output operator to make logging work nicely.
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const StrictNumeric<T>& value) {
+ os << static_cast<T>(value);
+ return os;
+}
+#endif
+
+#define BASE_NUMERIC_COMPARISON_OPERATORS(CLASS, NAME, OP) \
+ template <typename L, typename R, \
+ typename std::enable_if< \
+ internal::Is##CLASS##Op<L, R>::value>::type* = nullptr> \
+ constexpr bool operator OP(const L lhs, const R rhs) { \
+ return SafeCompare<NAME, typename UnderlyingType<L>::type, \
+ typename UnderlyingType<R>::type>(lhs, rhs); \
+ }
+
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLess, <)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLessOrEqual, <=)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreater, >)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreaterOrEqual, >=)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsEqual, ==)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsNotEqual, !=)
+
+} // namespace internal
+
+using internal::as_signed;
+using internal::as_unsigned;
+using internal::checked_cast;
+using internal::IsTypeInRangeForNumericType;
+using internal::IsValueInRangeForNumericType;
+using internal::IsValueNegative;
+using internal::MakeStrictNum;
+using internal::SafeUnsignedAbs;
+using internal::saturated_cast;
+using internal::strict_cast;
+using internal::StrictNumeric;
+
+// Explicitly make a shorter size_t alias for convenience.
+using SizeT = StrictNumeric<size_t>;
+
+// floating -> integral conversions that saturate and thus can actually return
+// an integral type. In most cases, these should be preferred over the std::
+// versions.
+template <typename Dst = int, typename Src,
+ typename = std::enable_if_t<std::is_integral<Dst>::value &&
+ std::is_floating_point<Src>::value>>
+Dst ClampFloor(Src value) {
+ return saturated_cast<Dst>(std::floor(value));
+}
+template <typename Dst = int, typename Src,
+ typename = std::enable_if_t<std::is_integral<Dst>::value &&
+ std::is_floating_point<Src>::value>>
+Dst ClampCeil(Src value) {
+ return saturated_cast<Dst>(std::ceil(value));
+}
+template <typename Dst = int, typename Src,
+ typename = std::enable_if_t<std::is_integral<Dst>::value &&
+ std::is_floating_point<Src>::value>>
+Dst ClampRound(Src value) {
+ const Src rounded =
+ (value >= 0.0f) ? std::floor(value + 0.5f) : std::ceil(value - 0.5f);
+ return saturated_cast<Dst>(rounded);
}
} // namespace base
diff --git a/deps/v8/src/base/safe_conversions_arm_impl.h b/deps/v8/src/base/safe_conversions_arm_impl.h
new file mode 100644
index 0000000000..0e08a14405
--- /dev/null
+++ b/deps/v8/src/base/safe_conversions_arm_impl.h
@@ -0,0 +1,60 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2014 the V8 project authors. All rights reserved.
+// List of adaptations:
+// - include guard names
+// - wrap in v8 namespace
+// - include paths
+
+#ifndef V8_BASE_SAFE_CONVERSIONS_ARM_IMPL_H_
+#define V8_BASE_SAFE_CONVERSIONS_ARM_IMPL_H_
+
+#include <cassert>
+#include <limits>
+#include <type_traits>
+
+#include "src/base/safe_conversions_impl.h"
+
+namespace v8 {
+namespace base {
+namespace internal {
+
+// Fast saturation to a destination type.
+template <typename Dst, typename Src>
+struct SaturateFastAsmOp {
+ static constexpr bool is_supported =
+ std::is_signed<Src>::value && std::is_integral<Dst>::value &&
+ std::is_integral<Src>::value &&
+ IntegerBitsPlusSign<Src>::value <= IntegerBitsPlusSign<int32_t>::value &&
+ IntegerBitsPlusSign<Dst>::value <= IntegerBitsPlusSign<int32_t>::value &&
+ !IsTypeInRangeForNumericType<Dst, Src>::value;
+
+ __attribute__((always_inline)) static Dst Do(Src value) {
+ int32_t src = value;
+ typename std::conditional<std::is_signed<Dst>::value, int32_t,
+ uint32_t>::type result;
+ if (std::is_signed<Dst>::value) {
+ asm("ssat %[dst], %[shift], %[src]"
+ : [dst] "=r"(result)
+ : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value <= 32
+ ? IntegerBitsPlusSign<Dst>::value
+ : 32));
+ } else {
+ asm("usat %[dst], %[shift], %[src]"
+ : [dst] "=r"(result)
+ : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value < 32
+ ? IntegerBitsPlusSign<Dst>::value
+ : 31));
+ }
+ return static_cast<Dst>(result);
+ }
+};
+
+} // namespace internal
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_SAFE_CONVERSIONS_ARM_IMPL_H_
diff --git a/deps/v8/src/base/safe_conversions_impl.h b/deps/v8/src/base/safe_conversions_impl.h
index 90c8e19353..5d9277df24 100644
--- a/deps/v8/src/base/safe_conversions_impl.h
+++ b/deps/v8/src/base/safe_conversions_impl.h
@@ -4,28 +4,130 @@
// Slightly adapted for inclusion in V8.
// Copyright 2014 the V8 project authors. All rights reserved.
+// List of adaptations:
+// - include guard names
+// - wrap in v8 namespace
+// - formatting (git cl format)
#ifndef V8_BASE_SAFE_CONVERSIONS_IMPL_H_
#define V8_BASE_SAFE_CONVERSIONS_IMPL_H_
+#include <stdint.h>
+
#include <limits>
+#include <type_traits>
-#include "src/base/logging.h"
-#include "src/base/macros.h"
+#if defined(__GNUC__) || defined(__clang__)
+#define BASE_NUMERICS_LIKELY(x) __builtin_expect(!!(x), 1)
+#define BASE_NUMERICS_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define BASE_NUMERICS_LIKELY(x) (x)
+#define BASE_NUMERICS_UNLIKELY(x) (x)
+#endif
namespace v8 {
namespace base {
namespace internal {
// The std library doesn't provide a binary max_exponent for integers, however
-// we can compute one by adding one to the number of non-sign bits. This allows
-// for accurate range comparisons between floating point and integer types.
+// we can compute an analog using std::numeric_limits<>::digits.
template <typename NumericType>
struct MaxExponent {
- static const int value = std::numeric_limits<NumericType>::is_iec559
+ static const int value = std::is_floating_point<NumericType>::value
? std::numeric_limits<NumericType>::max_exponent
- : (sizeof(NumericType) * 8 + 1 -
- std::numeric_limits<NumericType>::is_signed);
+ : std::numeric_limits<NumericType>::digits + 1;
+};
+
+// The number of bits (including the sign) in an integer. Eliminates sizeof
+// hacks.
+template <typename NumericType>
+struct IntegerBitsPlusSign {
+ static const int value = std::numeric_limits<NumericType>::digits +
+ std::is_signed<NumericType>::value;
+};
+
+// Helper templates for integer manipulations.
+
+template <typename Integer>
+struct PositionOfSignBit {
+ static const size_t value = IntegerBitsPlusSign<Integer>::value - 1;
+};
+
+// Determines if a numeric value is negative without throwing compiler
+// warnings on: unsigned(value) < 0.
+template <typename T,
+ typename std::enable_if<std::is_signed<T>::value>::type* = nullptr>
+constexpr bool IsValueNegative(T value) {
+ static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+ return value < 0;
+}
+
+template <typename T,
+ typename std::enable_if<!std::is_signed<T>::value>::type* = nullptr>
+constexpr bool IsValueNegative(T) {
+ static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+ return false;
+}
+
+// This performs a fast negation, returning a signed value. It works on unsigned
+// arguments, but probably doesn't do what you want for any unsigned value
+// larger than max / 2 + 1 (i.e. signed min cast to unsigned).
+template <typename T>
+constexpr typename std::make_signed<T>::type ConditionalNegate(
+ T x, bool is_negative) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ using SignedT = typename std::make_signed<T>::type;
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ return static_cast<SignedT>(
+ (static_cast<UnsignedT>(x) ^ -SignedT(is_negative)) + is_negative);
+}
+
+// This performs a safe, absolute value via unsigned overflow.
+template <typename T>
+constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ return IsValueNegative(value)
+ ? static_cast<UnsignedT>(0u - static_cast<UnsignedT>(value))
+ : static_cast<UnsignedT>(value);
+}
+
+// This allows us to switch paths on known compile-time constants.
+#if defined(__clang__) || defined(__GNUC__)
+constexpr bool CanDetectCompileTimeConstant() { return true; }
+template <typename T>
+constexpr bool IsCompileTimeConstant(const T v) {
+ return __builtin_constant_p(v);
+}
+#else
+constexpr bool CanDetectCompileTimeConstant() { return false; }
+template <typename T>
+constexpr bool IsCompileTimeConstant(const T) {
+ return false;
+}
+#endif
+template <typename T>
+constexpr bool MustTreatAsConstexpr(const T v) {
+ // Either we can't detect a compile-time constant, and must always use the
+ // constexpr path, or we know we have a compile-time constant.
+ return !CanDetectCompileTimeConstant() || IsCompileTimeConstant(v);
+}
+
+// Forces a crash, like a CHECK(false). Used for numeric boundary errors.
+// Also used in a constexpr template to trigger a compilation failure on
+// an error condition.
+struct CheckOnFailure {
+ template <typename T>
+ static T HandleFailure() {
+#if defined(_MSC_VER)
+ __debugbreak();
+#elif defined(__GNUC__) || defined(__clang__)
+ __builtin_trap();
+#else
+ ((void)(*(volatile char*)0 = 0));
+#endif
+ return T();
+ }
};
enum IntegerRepresentation {
@@ -35,7 +137,7 @@ enum IntegerRepresentation {
// A range for a given nunmeric Src type is contained for a given numeric Dst
// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
-// numeric_limits<Src>::min() >= numeric_limits<Dst>::min() are true.
+// numeric_limits<Src>::lowest() >= numeric_limits<Dst>::lowest() are true.
// We implement this as template specializations rather than simple static
// comparisons to ensure type correctness in our comparisons.
enum NumericRangeRepresentation {
@@ -46,16 +148,13 @@ enum NumericRangeRepresentation {
// Helper templates to statically determine if our destination type can contain
// maximum and minimum values represented by the source type.
-template <
- typename Dst,
- typename Src,
- IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
- ? INTEGER_REPRESENTATION_SIGNED
- : INTEGER_REPRESENTATION_UNSIGNED,
- IntegerRepresentation SrcSign =
- std::numeric_limits<Src>::is_signed
- ? INTEGER_REPRESENTATION_SIGNED
- : INTEGER_REPRESENTATION_UNSIGNED >
+template <typename Dst, typename Src,
+ IntegerRepresentation DstSign = std::is_signed<Dst>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::is_signed<Src>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED>
struct StaticDstRangeRelationToSrcRange;
// Same sign: Dst is guaranteed to contain Src only if its range is equal or
@@ -90,127 +189,630 @@ struct StaticDstRangeRelationToSrcRange<Dst,
static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
};
-enum RangeConstraint {
- RANGE_VALID = 0x0, // Value can be represented by the destination type.
- RANGE_UNDERFLOW = 0x1, // Value would overflow.
- RANGE_OVERFLOW = 0x2, // Value would underflow.
- RANGE_INVALID = RANGE_UNDERFLOW | RANGE_OVERFLOW // Invalid (i.e. NaN).
+// This class wraps the range constraints as separate booleans so the compiler
+// can identify constants and eliminate unused code paths.
+class RangeCheck {
+ public:
+ constexpr RangeCheck(bool is_in_lower_bound, bool is_in_upper_bound)
+ : is_underflow_(!is_in_lower_bound), is_overflow_(!is_in_upper_bound) {}
+ constexpr RangeCheck() : is_underflow_(0), is_overflow_(0) {}
+ constexpr bool IsValid() const { return !is_overflow_ && !is_underflow_; }
+ constexpr bool IsInvalid() const { return is_overflow_ && is_underflow_; }
+ constexpr bool IsOverflow() const { return is_overflow_ && !is_underflow_; }
+ constexpr bool IsUnderflow() const { return !is_overflow_ && is_underflow_; }
+ constexpr bool IsOverflowFlagSet() const { return is_overflow_; }
+ constexpr bool IsUnderflowFlagSet() const { return is_underflow_; }
+ constexpr bool operator==(const RangeCheck rhs) const {
+ return is_underflow_ == rhs.is_underflow_ &&
+ is_overflow_ == rhs.is_overflow_;
+ }
+ constexpr bool operator!=(const RangeCheck rhs) const {
+ return !(*this == rhs);
+ }
+
+ private:
+ // Do not change the order of these member variables. The integral conversion
+ // optimization depends on this exact order.
+ const bool is_underflow_;
+ const bool is_overflow_;
};
-// Helper function for coercing an int back to a RangeContraint.
-inline RangeConstraint GetRangeConstraint(int integer_range_constraint) {
- DCHECK(integer_range_constraint >= RANGE_VALID &&
- integer_range_constraint <= RANGE_INVALID);
- return static_cast<RangeConstraint>(integer_range_constraint);
-}
+// The following helper template addresses a corner case in range checks for
+// conversion from a floating-point type to an integral type of smaller range
+// but larger precision (e.g. float -> unsigned). The problem is as follows:
+// 1. Integral maximum is always one less than a power of two, so it must be
+// truncated to fit the mantissa of the floating point. The direction of
+// rounding is implementation defined, but by default it's always IEEE
+// floats, which round to nearest and thus result in a value of larger
+// magnitude than the integral value.
+// Example: float f = UINT_MAX; // f is 4294967296f but UINT_MAX
+// // is 4294967295u.
+// 2. If the floating point value is equal to the promoted integral maximum
+// value, a range check will erroneously pass.
+// Example: (4294967296f <= 4294967295u) // This is true due to a precision
+// // loss in rounding up to float.
+// 3. When the floating point value is then converted to an integral, the
+// resulting value is out of range for the target integral type and
+// thus is implementation defined.
+// Example: unsigned u = (float)INT_MAX; // u will typically overflow to 0.
+// To fix this bug we manually truncate the maximum value when the destination
+// type is an integral of larger precision than the source floating-point type,
+// such that the resulting maximum is represented exactly as a floating point.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct NarrowingRange {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = typename std::numeric_limits<Dst>;
-// This function creates a RangeConstraint from an upper and lower bound
-// check by taking advantage of the fact that only NaN can be out of range in
-// both directions at once.
-inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound,
- bool is_in_lower_bound) {
- return GetRangeConstraint((is_in_upper_bound ? 0 : RANGE_OVERFLOW) |
- (is_in_lower_bound ? 0 : RANGE_UNDERFLOW));
-}
+ // Computes the mask required to make an accurate comparison between types.
+ static const int kShift =
+ (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
+ SrcLimits::digits < DstLimits::digits)
+ ? (DstLimits::digits - SrcLimits::digits)
+ : 0;
+ template <typename T, typename std::enable_if<
+ std::is_integral<T>::value>::type* = nullptr>
-template <
- typename Dst,
- typename Src,
- IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
- ? INTEGER_REPRESENTATION_SIGNED
- : INTEGER_REPRESENTATION_UNSIGNED,
- IntegerRepresentation SrcSign = std::numeric_limits<Src>::is_signed
- ? INTEGER_REPRESENTATION_SIGNED
- : INTEGER_REPRESENTATION_UNSIGNED,
- NumericRangeRepresentation DstRange =
- StaticDstRangeRelationToSrcRange<Dst, Src>::value >
+ // Masks out the integer bits that are beyond the precision of the
+ // intermediate type used for comparison.
+ static constexpr T Adjust(T value) {
+ static_assert(std::is_same<T, Dst>::value, "");
+ static_assert(kShift < DstLimits::digits, "");
+ return static_cast<T>(
+ ConditionalNegate(SafeUnsignedAbs(value) & ~((T(1) << kShift) - T(1)),
+ IsValueNegative(value)));
+ }
+
+ template <typename T, typename std::enable_if<
+ std::is_floating_point<T>::value>::type* = nullptr>
+ static constexpr T Adjust(T value) {
+ static_assert(std::is_same<T, Dst>::value, "");
+ static_assert(kShift == 0, "");
+ return value;
+ }
+
+ static constexpr Dst max() { return Adjust(Bounds<Dst>::max()); }
+ static constexpr Dst lowest() { return Adjust(Bounds<Dst>::lowest()); }
+};
+
+template <typename Dst, typename Src, template <typename> class Bounds,
+ IntegerRepresentation DstSign = std::is_signed<Dst>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::is_signed<Src>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ NumericRangeRepresentation DstRange =
+ StaticDstRangeRelationToSrcRange<Dst, Src>::value>
struct DstRangeRelationToSrcRangeImpl;
// The following templates are for ranges that must be verified at runtime. We
// split it into checks based on signedness to avoid confusing casts and
// compiler warnings on signed an unsigned comparisons.
-// Dst range is statically determined to contain Src: Nothing to check.
-template <typename Dst,
- typename Src,
- IntegerRepresentation DstSign,
- IntegerRepresentation SrcSign>
-struct DstRangeRelationToSrcRangeImpl<Dst,
- Src,
- DstSign,
- SrcSign,
+// Same sign narrowing: The range is contained for normal limits.
+template <typename Dst, typename Src, template <typename> class Bounds,
+ IntegerRepresentation DstSign, IntegerRepresentation SrcSign>
+struct DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds, DstSign, SrcSign,
NUMERIC_RANGE_CONTAINED> {
- static RangeConstraint Check(Src value) { return RANGE_VALID; }
+ static constexpr RangeCheck Check(Src value) {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(
+ static_cast<Dst>(SrcLimits::lowest()) >= DstLimits::lowest() ||
+ static_cast<Dst>(value) >= DstLimits::lowest(),
+ static_cast<Dst>(SrcLimits::max()) <= DstLimits::max() ||
+ static_cast<Dst>(value) <= DstLimits::max());
+ }
};
// Signed to signed narrowing: Both the upper and lower boundaries may be
-// exceeded.
-template <typename Dst, typename Src>
-struct DstRangeRelationToSrcRangeImpl<Dst,
- Src,
- INTEGER_REPRESENTATION_SIGNED,
- INTEGER_REPRESENTATION_SIGNED,
- NUMERIC_RANGE_NOT_CONTAINED> {
- static RangeConstraint Check(Src value) {
- return std::numeric_limits<Dst>::is_iec559
- ? GetRangeConstraint(value <= std::numeric_limits<Dst>::max(),
- value >= -std::numeric_limits<Dst>::max())
- : GetRangeConstraint(value <= std::numeric_limits<Dst>::max(),
- value >= std::numeric_limits<Dst>::min());
+// exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<
+ Dst, Src, Bounds, INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_SIGNED, NUMERIC_RANGE_NOT_CONTAINED> {
+ static constexpr RangeCheck Check(Src value) {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(value >= DstLimits::lowest(), value <= DstLimits::max());
}
};
-// Unsigned to unsigned narrowing: Only the upper boundary can be exceeded.
-template <typename Dst, typename Src>
-struct DstRangeRelationToSrcRangeImpl<Dst,
- Src,
- INTEGER_REPRESENTATION_UNSIGNED,
- INTEGER_REPRESENTATION_UNSIGNED,
- NUMERIC_RANGE_NOT_CONTAINED> {
- static RangeConstraint Check(Src value) {
- return GetRangeConstraint(value <= std::numeric_limits<Dst>::max(), true);
+// Unsigned to unsigned narrowing: Only the upper bound can be exceeded for
+// standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<
+ Dst, Src, Bounds, INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED, NUMERIC_RANGE_NOT_CONTAINED> {
+ static constexpr RangeCheck Check(Src value) {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(
+ DstLimits::lowest() == Dst(0) || value >= DstLimits::lowest(),
+ value <= DstLimits::max());
}
};
-// Unsigned to signed: The upper boundary may be exceeded.
-template <typename Dst, typename Src>
-struct DstRangeRelationToSrcRangeImpl<Dst,
- Src,
- INTEGER_REPRESENTATION_SIGNED,
- INTEGER_REPRESENTATION_UNSIGNED,
- NUMERIC_RANGE_NOT_CONTAINED> {
- static RangeConstraint Check(Src value) {
- return sizeof(Dst) > sizeof(Src)
- ? RANGE_VALID
- : GetRangeConstraint(
- value <= static_cast<Src>(std::numeric_limits<Dst>::max()),
- true);
+// Unsigned to signed: Only the upper bound can be exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<
+ Dst, Src, Bounds, INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED, NUMERIC_RANGE_NOT_CONTAINED> {
+ static constexpr RangeCheck Check(Src value) {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ using Promotion = decltype(Src() + Dst());
+ return RangeCheck(DstLimits::lowest() <= Dst(0) ||
+ static_cast<Promotion>(value) >=
+ static_cast<Promotion>(DstLimits::lowest()),
+ static_cast<Promotion>(value) <=
+ static_cast<Promotion>(DstLimits::max()));
}
};
// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
-// and any negative value exceeds the lower boundary.
+// and any negative value exceeds the lower boundary for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<
+ Dst, Src, Bounds, INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED, NUMERIC_RANGE_NOT_CONTAINED> {
+ static constexpr RangeCheck Check(Src value) {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ using Promotion = decltype(Src() + Dst());
+ bool ge_zero = false;
+ // Converting floating-point to integer will discard fractional part, so
+ // values in (-1.0, -0.0) will truncate to 0 and fit in Dst.
+ if (std::is_floating_point<Src>::value) {
+ ge_zero = value > Src(-1);
+ } else {
+ ge_zero = value >= Src(0);
+ }
+ return RangeCheck(
+ ge_zero && (DstLimits::lowest() == 0 ||
+ static_cast<Dst>(value) >= DstLimits::lowest()),
+ static_cast<Promotion>(SrcLimits::max()) <=
+ static_cast<Promotion>(DstLimits::max()) ||
+ static_cast<Promotion>(value) <=
+ static_cast<Promotion>(DstLimits::max()));
+ }
+};
+
+// Simple wrapper for statically checking if a type's range is contained.
template <typename Dst, typename Src>
-struct DstRangeRelationToSrcRangeImpl<Dst,
- Src,
- INTEGER_REPRESENTATION_UNSIGNED,
- INTEGER_REPRESENTATION_SIGNED,
- NUMERIC_RANGE_NOT_CONTAINED> {
- static RangeConstraint Check(Src value) {
- return (MaxExponent<Dst>::value >= MaxExponent<Src>::value)
- ? GetRangeConstraint(true, value >= static_cast<Src>(0))
- : GetRangeConstraint(
- value <= static_cast<Src>(std::numeric_limits<Dst>::max()),
- value >= static_cast<Src>(0));
+struct IsTypeInRangeForNumericType {
+ static const bool value = StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
+ NUMERIC_RANGE_CONTAINED;
+};
+
+template <typename Dst, template <typename> class Bounds = std::numeric_limits,
+ typename Src>
+constexpr RangeCheck DstRangeRelationToSrcRange(Src value) {
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+ static_assert(Bounds<Dst>::lowest() < Bounds<Dst>::max(), "");
+ return DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds>::Check(value);
+}
+
+// Integer promotion templates used by the portable checked integer arithmetic.
+template <size_t Size, bool IsSigned>
+struct IntegerForDigitsAndSign;
+
+#define INTEGER_FOR_DIGITS_AND_SIGN(I) \
+ template <> \
+ struct IntegerForDigitsAndSign<IntegerBitsPlusSign<I>::value, \
+ std::is_signed<I>::value> { \
+ using type = I; \
+ }
+
+INTEGER_FOR_DIGITS_AND_SIGN(int8_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint8_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int16_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint16_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int32_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint32_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int64_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint64_t);
+#undef INTEGER_FOR_DIGITS_AND_SIGN
+
+// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
+// support 128-bit math, then the ArithmeticPromotion template below will need
+// to be updated (or more likely replaced with a decltype expression).
+static_assert(IntegerBitsPlusSign<intmax_t>::value == 64,
+ "Max integer size not supported for this toolchain.");
+
+template <typename Integer, bool IsSigned = std::is_signed<Integer>::value>
+struct TwiceWiderInteger {
+ using type =
+ typename IntegerForDigitsAndSign<IntegerBitsPlusSign<Integer>::value * 2,
+ IsSigned>::type;
+};
+
+enum ArithmeticPromotionCategory {
+ LEFT_PROMOTION, // Use the type of the left-hand argument.
+ RIGHT_PROMOTION // Use the type of the right-hand argument.
+};
+
+// Determines the type that can represent the largest positive value.
+template <typename Lhs, typename Rhs,
+ ArithmeticPromotionCategory Promotion =
+ (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION>
+struct MaxExponentPromotion;
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, LEFT_PROMOTION> {
+ using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+ using type = Rhs;
+};
+
+// Determines the type that can represent the lowest arithmetic value.
+template <typename Lhs, typename Rhs,
+ ArithmeticPromotionCategory Promotion =
+ std::is_signed<Lhs>::value
+ ? (std::is_signed<Rhs>::value
+ ? (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION)
+ : LEFT_PROMOTION)
+ : (std::is_signed<Rhs>::value
+ ? RIGHT_PROMOTION
+ : (MaxExponent<Lhs>::value < MaxExponent<Rhs>::value
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION))>
+struct LowestValuePromotion;
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, LEFT_PROMOTION> {
+ using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+ using type = Rhs;
+};
+
+// Determines the type that is best able to represent an arithmetic result.
+template <
+ typename Lhs, typename Rhs = Lhs,
+ bool is_intmax_type =
+ std::is_integral<typename MaxExponentPromotion<Lhs, Rhs>::type>::value&&
+ IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::
+ value == IntegerBitsPlusSign<intmax_t>::value,
+ bool is_max_exponent =
+ StaticDstRangeRelationToSrcRange<
+ typename MaxExponentPromotion<Lhs, Rhs>::type, Lhs>::value ==
+ NUMERIC_RANGE_CONTAINED&& StaticDstRangeRelationToSrcRange<
+ typename MaxExponentPromotion<Lhs, Rhs>::type, Rhs>::value ==
+ NUMERIC_RANGE_CONTAINED>
+struct BigEnoughPromotion;
+
+// The side with the max exponent is big enough.
+template <typename Lhs, typename Rhs, bool is_intmax_type>
+struct BigEnoughPromotion<Lhs, Rhs, is_intmax_type, true> {
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = true;
+};
+
+// We can use a twice wider type to fit.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, false, false> {
+ using type =
+ typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ std::is_signed<Lhs>::value ||
+ std::is_signed<Rhs>::value>::type;
+ static const bool is_contained = true;
+};
+
+// No type is large enough.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, true, false> {
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = false;
+};
+
+// We can statically check if operations on the provided types can wrap, so we
+// can skip the checked operations if they're not needed. So, for an integer we
+// care if the destination type preserves the sign and is twice the width of
+// the source.
+template <typename T, typename Lhs, typename Rhs = Lhs>
+struct IsIntegerArithmeticSafe {
+ static const bool value =
+ !std::is_floating_point<T>::value &&
+ !std::is_floating_point<Lhs>::value &&
+ !std::is_floating_point<Rhs>::value &&
+ std::is_signed<T>::value >= std::is_signed<Lhs>::value &&
+ IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Lhs>::value) &&
+ std::is_signed<T>::value >= std::is_signed<Rhs>::value &&
+ IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Rhs>::value);
+};
+
+// Promotes to a type that can represent any possible result of a binary
+// arithmetic operation with the source types.
+template <typename Lhs, typename Rhs,
+ bool is_promotion_possible = IsIntegerArithmeticSafe<
+ typename std::conditional<std::is_signed<Lhs>::value ||
+ std::is_signed<Rhs>::value,
+ intmax_t, uintmax_t>::type,
+ typename MaxExponentPromotion<Lhs, Rhs>::type>::value>
+struct FastIntegerArithmeticPromotion;
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, true> {
+ using type =
+ typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ std::is_signed<Lhs>::value ||
+ std::is_signed<Rhs>::value>::type;
+ static_assert(IsIntegerArithmeticSafe<type, Lhs, Rhs>::value, "");
+ static const bool is_contained = true;
+};
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, false> {
+ using type = typename BigEnoughPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = false;
+};
+
+// Extracts the underlying type from an enum.
+template <typename T, bool is_enum = std::is_enum<T>::value>
+struct ArithmeticOrUnderlyingEnum;
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, true> {
+ using type = typename std::underlying_type<T>::type;
+ static const bool value = std::is_arithmetic<type>::value;
+};
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, false> {
+ using type = T;
+ static const bool value = std::is_arithmetic<type>::value;
+};
+
+// The following are helper templates used in the CheckedNumeric class.
+template <typename T>
+class CheckedNumeric;
+
+template <typename T>
+class ClampedNumeric;
+
+template <typename T>
+class StrictNumeric;
+
+// Used to treat CheckedNumeric and arithmetic underlying types the same.
+template <typename T>
+struct UnderlyingType {
+ using type = typename ArithmeticOrUnderlyingEnum<T>::type;
+ static const bool is_numeric = std::is_arithmetic<type>::value;
+ static const bool is_checked = false;
+ static const bool is_clamped = false;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<CheckedNumeric<T>> {
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = true;
+ static const bool is_clamped = false;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<ClampedNumeric<T>> {
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = false;
+ static const bool is_clamped = true;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<StrictNumeric<T>> {
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = false;
+ static const bool is_clamped = false;
+ static const bool is_strict = true;
+};
+
+template <typename L, typename R>
+struct IsCheckedOp {
+ static const bool value =
+ UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
+};
+
+template <typename L, typename R>
+struct IsClampedOp {
+ static const bool value =
+ UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped) &&
+ !(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
+};
+
+template <typename L, typename R>
+struct IsStrictOp {
+ static const bool value =
+ UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_strict || UnderlyingType<R>::is_strict) &&
+ !(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked) &&
+ !(UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped);
+};
+
+// as_signed<> returns the supplied integral value (or integral castable
+// Numeric template) cast as a signed integral of equivalent precision.
+// I.e. it's mostly an alias for: static_cast<std::make_signed<T>::type>(t)
+template <typename Src>
+constexpr typename std::make_signed<
+ typename base::internal::UnderlyingType<Src>::type>::type
+as_signed(const Src value) {
+ static_assert(std::is_integral<decltype(as_signed(value))>::value,
+ "Argument must be a signed or unsigned integer type.");
+ return static_cast<decltype(as_signed(value))>(value);
+}
+
+// as_unsigned<> returns the supplied integral value (or integral castable
+// Numeric template) cast as an unsigned integral of equivalent precision.
+// I.e. it's mostly an alias for: static_cast<std::make_unsigned<T>::type>(t)
+template <typename Src>
+constexpr typename std::make_unsigned<
+ typename base::internal::UnderlyingType<Src>::type>::type
+as_unsigned(const Src value) {
+ static_assert(std::is_integral<decltype(as_unsigned(value))>::value,
+ "Argument must be a signed or unsigned integer type.");
+ return static_cast<decltype(as_unsigned(value))>(value);
+}
+
+template <typename L, typename R>
+constexpr bool IsLessImpl(const L lhs, const R rhs, const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsUnderflow() || r_range.IsOverflow() ||
+ (l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) <
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLess {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsLessImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsLessOrEqualImpl(const L lhs, const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsUnderflow() || r_range.IsOverflow() ||
+ (l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) <=
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLessOrEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsLessOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsGreaterImpl(const L lhs, const R rhs, const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsOverflow() || r_range.IsUnderflow() ||
+ (l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) >
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreater {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsGreaterImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
}
};
+template <typename L, typename R>
+constexpr bool IsGreaterOrEqualImpl(const L lhs, const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsOverflow() || r_range.IsUnderflow() ||
+ (l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) >=
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreaterOrEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsGreaterOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+struct IsEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return DstRangeRelationToSrcRange<R>(lhs) ==
+ DstRangeRelationToSrcRange<L>(rhs) &&
+ static_cast<decltype(lhs + rhs)>(lhs) ==
+ static_cast<decltype(lhs + rhs)>(rhs);
+ }
+};
+
+template <typename L, typename R>
+struct IsNotEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return DstRangeRelationToSrcRange<R>(lhs) !=
+ DstRangeRelationToSrcRange<L>(rhs) ||
+ static_cast<decltype(lhs + rhs)>(lhs) !=
+ static_cast<decltype(lhs + rhs)>(rhs);
+ }
+};
+
+// These perform the actual math operations on the CheckedNumerics.
+// Binary arithmetic operations.
+template <template <typename, typename> class C, typename L, typename R>
+constexpr bool SafeCompare(const L lhs, const R rhs) {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ using Promotion = BigEnoughPromotion<L, R>;
+ using BigType = typename Promotion::type;
+ return Promotion::is_contained
+ // Force to a larger type for speed if both are contained.
+ ? C<BigType, BigType>::Test(
+ static_cast<BigType>(static_cast<L>(lhs)),
+ static_cast<BigType>(static_cast<R>(rhs)))
+ // Let the template functions figure it out for mixed types.
+ : C<L, R>::Test(lhs, rhs);
+}
+
+template <typename Dst, typename Src>
+constexpr bool IsMaxInRangeForNumericType() {
+ return IsGreaterOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::max(),
+ std::numeric_limits<Src>::max());
+}
+
template <typename Dst, typename Src>
-inline RangeConstraint DstRangeRelationToSrcRange(Src value) {
- // Both source and destination must be numeric.
- STATIC_ASSERT(std::numeric_limits<Src>::is_specialized);
- STATIC_ASSERT(std::numeric_limits<Dst>::is_specialized);
- return DstRangeRelationToSrcRangeImpl<Dst, Src>::Check(value);
+constexpr bool IsMinInRangeForNumericType() {
+ return IsLessOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::lowest(),
+ std::numeric_limits<Src>::lowest());
+}
+
+template <typename Dst, typename Src>
+constexpr Dst CommonMax() {
+ return !IsMaxInRangeForNumericType<Dst, Src>()
+ ? Dst(std::numeric_limits<Dst>::max())
+ : Dst(std::numeric_limits<Src>::max());
+}
+
+template <typename Dst, typename Src>
+constexpr Dst CommonMin() {
+ return !IsMinInRangeForNumericType<Dst, Src>()
+ ? Dst(std::numeric_limits<Dst>::lowest())
+ : Dst(std::numeric_limits<Src>::lowest());
+}
+
+// This is a wrapper to generate return the max or min for a supplied type.
+// If the argument is false, the returned value is the maximum. If true the
+// returned value is the minimum.
+template <typename Dst, typename Src = Dst>
+constexpr Dst CommonMaxOrMin(bool is_min) {
+ return is_min ? CommonMin<Dst, Src>() : CommonMax<Dst, Src>();
}
} // namespace internal
diff --git a/deps/v8/src/base/threaded-list.h b/deps/v8/src/base/threaded-list.h
index f0eed52ede..91c726474e 100644
--- a/deps/v8/src/base/threaded-list.h
+++ b/deps/v8/src/base/threaded-list.h
@@ -29,6 +29,9 @@ template <typename T, typename BaseClass,
class ThreadedListBase final : public BaseClass {
public:
ThreadedListBase() : head_(nullptr), tail_(&head_) {}
+ ThreadedListBase(const ThreadedListBase&) = delete;
+ ThreadedListBase& operator=(const ThreadedListBase&) = delete;
+
void Add(T* v) {
DCHECK_NULL(*tail_);
DCHECK_NULL(*TLTraits::next(v));
@@ -253,7 +256,6 @@ class ThreadedListBase final : public BaseClass {
private:
T* head_;
T** tail_;
- DISALLOW_COPY_AND_ASSIGN(ThreadedListBase);
};
struct EmptyBase {};
diff --git a/deps/v8/src/builtins/DIR_METADATA b/deps/v8/src/builtins/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/builtins/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/builtins/OWNERS b/deps/v8/src/builtins/OWNERS
index f52e1c9ca8..48d72aea5e 100644
--- a/deps/v8/src/builtins/OWNERS
+++ b/deps/v8/src/builtins/OWNERS
@@ -1,3 +1 @@
file:../../COMMON_OWNERS
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc
index 4258b07a7b..eea53bca09 100644
--- a/deps/v8/src/builtins/accessors.cc
+++ b/deps/v8/src/builtins/accessors.cc
@@ -81,12 +81,13 @@ bool Accessors::IsJSObjectFieldAccessor(Isolate* isolate, Handle<Map> map,
}
V8_WARN_UNUSED_RESULT MaybeHandle<Object>
-Accessors::ReplaceAccessorWithDataProperty(Handle<Object> receiver,
+Accessors::ReplaceAccessorWithDataProperty(Isolate* isolate,
+ Handle<Object> receiver,
Handle<JSObject> holder,
Handle<Name> name,
Handle<Object> value) {
- LookupIterator it(holder->GetIsolate(), receiver, name, holder,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
+ LookupIterator it(isolate, receiver, LookupIterator::Key(isolate, name),
+ holder, LookupIterator::OWN_SKIP_INTERCEPTOR);
// Skip any access checks we might hit. This accessor should never hit in a
// situation where the caller does not have access.
if (it.state() == LookupIterator::ACCESS_CHECK) {
@@ -114,8 +115,8 @@ void Accessors::ReconfigureToDataProperty(
Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
Handle<Name> name = Utils::OpenHandle(*key);
Handle<Object> value = Utils::OpenHandle(*val);
- MaybeHandle<Object> result =
- Accessors::ReplaceAccessorWithDataProperty(receiver, holder, name, value);
+ MaybeHandle<Object> result = Accessors::ReplaceAccessorWithDataProperty(
+ isolate, receiver, holder, name, value);
if (result.is_null()) {
isolate->OptionalRescheduleException(false);
} else {
diff --git a/deps/v8/src/builtins/accessors.h b/deps/v8/src/builtins/accessors.h
index faee0d9b67..7bc8075e55 100644
--- a/deps/v8/src/builtins/accessors.h
+++ b/deps/v8/src/builtins/accessors.h
@@ -102,8 +102,8 @@ class Accessors : public AllStatic {
FieldIndex* field_index);
static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
- Handle<Object> receiver, Handle<JSObject> holder, Handle<Name> name,
- Handle<Object> value);
+ Isolate* isolate, Handle<Object> receiver, Handle<JSObject> holder,
+ Handle<Name> name, Handle<Object> value);
// Create an AccessorInfo. The setter is optional (can be nullptr).
//
diff --git a/deps/v8/src/builtins/aggregate-error.tq b/deps/v8/src/builtins/aggregate-error.tq
index 0f4a47b3e7..9c70ffcb00 100644
--- a/deps/v8/src/builtins/aggregate-error.tq
+++ b/deps/v8/src/builtins/aggregate-error.tq
@@ -9,10 +9,6 @@ namespace error {
transitioning javascript builtin AggregateErrorConstructor(
js-implicit context: NativeContext, target: JSFunction,
newTarget: JSAny)(...arguments): JSAny {
- // This function is implementing the spec as suggested by
- // https://github.com/tc39/proposal-promise-any/pull/59 . FIXME(marja):
- // change this if the PR is declined, otherwise remove the comment.
-
// 1. If NewTarget is undefined, let newTarget be the active function
// object, else let newTarget be NewTarget.
// 2. Let O be ? OrdinaryCreateFromConstructor(newTarget,
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index e0a6ee1611..5a0a59d879 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -72,38 +72,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
-
-void LoadStackLimit(MacroAssembler* masm, Register destination,
- StackLimitKind kind) {
- DCHECK(masm->root_array_available());
- Isolate* isolate = masm->isolate();
- ExternalReference limit =
- kind == StackLimitKind::kRealStackLimit
- ? ExternalReference::address_of_real_jslimit(isolate)
- : ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
-
- intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
- CHECK(is_int32(offset));
- __ ldr(destination, MemOperand(kRootRegister, offset));
-}
-
-void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch, Label* stack_overflow) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ sub(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
- __ b(le, stack_overflow); // Signed comparison.
-}
-
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -118,7 +86,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
Label stack_overflow;
- Generate_StackOverflowCheck(masm, r0, scratch, &stack_overflow);
+ __ StackOverflowCheck(r0, scratch, &stack_overflow);
// Enter a construct frame.
{
@@ -129,7 +97,11 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Push(cp, r0);
__ SmiUntag(r0);
-#ifdef V8_REVERSE_JSARGS
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
+
// Set up pointer to last argument (skip receiver).
__ add(
r4, fp,
@@ -138,14 +110,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ PushArray(r4, r0, r5);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-#else
- // The receiver for the builtin/api call.
- __ PushRoot(RootIndex::kTheHoleValue);
- // Set up pointer to last argument.
- __ add(r4, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- // Copy arguments and receiver to the expression stack.
- __ PushArray(r4, r0, r5);
-#endif
// Call the function.
// r0: number of arguments (untagged)
@@ -187,165 +151,155 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
+ FrameScope scope(masm, StackFrame::MANUAL);
// Enter a construct frame.
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
- Label post_instantiation_deopt_entry, not_create_implicit_receiver;
-
- // Preserve the incoming parameters on the stack.
- __ LoadRoot(r4, RootIndex::kTheHoleValue);
- __ SmiTag(r0);
- __ Push(cp, r0, r1, r4, r3);
-
- // ----------- S t a t e -------------
- // -- sp[0*kPointerSize]: new target
- // -- sp[1*kPointerSize]: padding
- // -- r1 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
- // -----------------------------------
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+ __ EnterFrame(StackFrame::CONSTRUCT);
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
- __ DecodeField<SharedFunctionInfo::FunctionKindBits>(r4);
- __ JumpIfIsInRange(r4, kDefaultDerivedConstructor, kDerivedConstructor,
- &not_create_implicit_receiver);
-
- // If not derived class constructor: Allocate the new receiver object.
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
- r4, r5);
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ b(&post_instantiation_deopt_entry);
-
- // Else: use TheHoleValue as receiver for constructor call
- __ bind(&not_create_implicit_receiver);
- __ LoadRoot(r0, RootIndex::kTheHoleValue);
-
- // ----------- S t a t e -------------
- // -- r0: receiver
- // -- Slot 3 / sp[0*kPointerSize]: new target
- // -- Slot 2 / sp[1*kPointerSize]: constructor function
- // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize]: context
- // -----------------------------------
- // Deoptimizer enters here.
- masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
- masm->pc_offset());
- __ bind(&post_instantiation_deopt_entry);
-
- // Restore new target.
- __ Pop(r3);
-
-#ifdef V8_REVERSE_JSARGS
- // Push the allocated receiver to the stack.
- __ Push(r0);
- // We need two copies because we may have to return the original one
- // and the calling conventions dictate that the called function pops the
- // receiver. The second copy is pushed after the arguments, we saved in r6
- // since r0 needs to store the number of arguments before
- // InvokingFunction.
- __ mov(r6, r0);
-
- // Set up pointer to first argument (skip receiver).
- __ add(
- r4, fp,
- Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
-#else
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(r0, r0);
-
- // Set up pointer to last argument.
- __ add(r4, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-#endif
+ // Preserve the incoming parameters on the stack.
+ __ LoadRoot(r4, RootIndex::kTheHoleValue);
+ __ SmiTag(r0);
+ __ Push(cp, r0, r1, r4, r3);
- // Restore constructor function and argument count.
- __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
- __ ldr(r0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
- __ SmiUntag(r0);
+ // ----------- S t a t e -------------
+ // -- sp[0*kPointerSize]: new target
+ // -- sp[1*kPointerSize]: padding
+ // -- r1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
- Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, r0, r5, &stack_overflow);
- __ b(&enough_stack_space);
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(r4);
+ __ JumpIfIsInRange(r4, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver);
- __ bind(&stack_overflow);
- // Restore the context from the frame.
- __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kThrowStackOverflow);
- // Unreachable code.
- __ bkpt(0);
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, r4,
+ r5);
+ __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
+ __ b(&post_instantiation_deopt_entry);
- __ bind(&enough_stack_space);
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(r0, RootIndex::kTheHoleValue);
- // Copy arguments to the expression stack.
- __ PushArray(r4, r0, r5);
+ // ----------- S t a t e -------------
+ // -- r0: receiver
+ // -- Slot 3 / sp[0*kPointerSize]: new target
+ // -- Slot 2 / sp[1*kPointerSize]: constructor function
+ // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[3*kPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
+
+ // Restore new target.
+ __ Pop(r3);
+
+ // Push the allocated receiver to the stack.
+ __ Push(r0);
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments, we saved in r6
+ // since r0 needs to store the number of arguments before
+ // InvokingFunction.
+ __ mov(r6, r0);
+
+ // Set up pointer to first argument (skip receiver).
+ __ add(r4, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
+
+ // Restore constructor function and argument count.
+ __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ ldr(r0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(r0);
-#ifdef V8_REVERSE_JSARGS
- // Push implicit receiver.
- __ Push(r6);
-#endif
+ Label stack_overflow;
+ __ StackOverflowCheck(r0, r5, &stack_overflow);
- // Call the function.
- __ InvokeFunctionWithNewTarget(r1, r3, r0, CALL_FUNCTION);
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
- // ----------- S t a t e -------------
- // -- r0: constructor result
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: padding
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments
- // -- sp[4*kPointerSize]: context
- // -----------------------------------
+ // Copy arguments to the expression stack.
+ __ PushArray(r4, r0, r5);
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
+ // Push implicit receiver.
+ __ Push(r6);
- // Restore the context from the frame.
- __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ // Call the function.
+ __ InvokeFunctionWithNewTarget(r1, r3, r0, CALL_FUNCTION);
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, do_throw, leave_frame;
+ // ----------- S t a t e -------------
+ // -- r0: constructor result
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
- // If the result is undefined, we jump out to using the implicit receiver.
- __ JumpIfRoot(r0, RootIndex::kUndefinedValue, &use_receiver);
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
- // Otherwise we do a smi check and fall through to check if the return value
- // is a valid receiver.
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, leave_and_return, check_receiver;
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(r0, &use_receiver);
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfNotRoot(r0, RootIndex::kUndefinedValue, &check_receiver);
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(r0, r4, r5, FIRST_JS_RECEIVER_TYPE);
- __ b(ge, &leave_frame);
- __ b(&use_receiver);
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
- __ bind(&do_throw);
- __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
+ __ JumpIfRoot(r0, RootIndex::kTheHoleValue, &do_throw);
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
- __ JumpIfRoot(r0, RootIndex::kTheHoleValue, &do_throw);
+ __ bind(&leave_and_return);
+ // Restore smi-tagged arguments count from the frame.
+ __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
+ __ LeaveFrame(StackFrame::CONSTRUCT);
- __ bind(&leave_frame);
- // Restore smi-tagged arguments count from the frame.
- __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
- // Leave construct frame.
- }
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(sp, sp, Operand(kPointerSize));
__ Jump(lr);
+
+ __ bind(&check_receiver);
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(r0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r0, r4, r5, FIRST_JS_RECEIVER_TYPE);
+ __ b(ge, &leave_and_return);
+ __ b(&use_receiver);
+
+ __ bind(&do_throw);
+ // Restore the context from the frame.
+ __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ __ bkpt(0);
+
+ __ bind(&stack_overflow);
+ // Restore the context from the frame.
+ __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ bkpt(0);
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
@@ -408,16 +362,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
__ cmp(sp, scratch);
__ b(lo, &stack_overflow);
-#ifndef V8_REVERSE_JSARGS
- // Push receiver.
- __ ldr(scratch, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
- __ Push(scratch);
-#endif
-
// ----------- S t a t e -------------
// -- r1 : the JSGeneratorObject to resume
// -- r4 : generator function
@@ -433,7 +381,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ ldr(r2,
FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset));
{
-#ifdef V8_REVERSE_JSARGS
Label done_loop, loop;
__ mov(r6, r3);
@@ -450,21 +397,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Push receiver.
__ ldr(scratch, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
__ Push(scratch);
-#else
- Label done_loop, loop;
- __ mov(r6, Operand(0));
-
- __ bind(&loop);
- __ cmp(r6, r3);
- __ b(ge, &done_loop);
- __ add(scratch, r2, Operand(r6, LSL, kTaggedSizeLog2));
- __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ Push(scratch);
- __ add(r6, r6, Operand(1));
- __ b(&loop);
-
- __ bind(&done_loop);
-#endif
}
// Underlying function needs to have bytecode available.
@@ -767,7 +699,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Clobbers r5.
Label enough_stack_space, stack_overflow;
__ add(r6, r0, Operand(1)); // Add one for receiver.
- Generate_StackOverflowCheck(masm, r6, r5, &stack_overflow);
+ __ StackOverflowCheck(r6, r5, &stack_overflow);
__ b(&enough_stack_space);
__ bind(&stack_overflow);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -782,7 +714,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r3: receiver
// r0: argc
// r4: argv, i.e. points to first arg
-#ifdef V8_REVERSE_JSARGS
Label loop, entry;
__ add(r6, r4, Operand(r0, LSL, kSystemPointerSizeLog2));
// r6 points past last arg.
@@ -798,23 +729,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the receiver.
__ Push(r3);
-#else
- // Push the receiver.
- __ Push(r3);
-
- Label loop, entry;
- __ add(r3, r4, Operand(r0, LSL, kSystemPointerSizeLog2));
- // r3 points past last arg.
- __ b(&entry);
- __ bind(&loop);
- __ ldr(r5, MemOperand(r4, kSystemPointerSize,
- PostIndex)); // read next parameter
- __ ldr(r5, MemOperand(r5)); // dereference handle
- __ push(r5); // push parameter
- __ bind(&entry);
- __ cmp(r4, r3);
- __ b(ne, &loop);
-#endif
// Setup new.target and function.
__ mov(r3, r1);
@@ -877,29 +791,43 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
OMIT_SMI_CHECK);
}
-static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
- Register args_count = scratch;
-
- // Get the arguments + receiver count.
- __ ldr(args_count,
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+ Register scratch2) {
+ Register params_size = scratch1;
+ // Get the size of the formal parameters + receiver (in bytes).
+ __ ldr(params_size,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ ldr(args_count,
- FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+ __ ldr(params_size,
+ FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ Register actual_params_size = scratch2;
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ ldr(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ lsl(actual_params_size, actual_params_size, Operand(kPointerSizeLog2));
+ __ add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ __ cmp(params_size, actual_params_size);
+ __ mov(params_size, actual_params_size, LeaveCC, lt);
+#endif
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
- __ add(sp, sp, args_count, LeaveCC);
+ __ add(sp, sp, params_size, LeaveCC);
}
-// Tail-call |function_id| if |smi_entry| == |marker|
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register smi_entry,
- OptimizationMarker marker,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ cmp(smi_entry, Operand(Smi::FromEnum(marker)));
+ __ cmp_raw_immediate(actual_marker, expected_marker);
__ b(ne, &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -916,16 +844,21 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(r1, r3, optimized_code_entry, scratch));
Register closure = r1;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
+ &heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- Label found_deoptimized_code;
__ ldr(scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ ldr(scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ b(ne, &found_deoptimized_code);
+ __ b(ne, &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code.
@@ -934,10 +867,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadCodeObjectEntry(r2, optimized_code_entry);
__ Jump(r2);
- // Optimized code slot contains deoptimized code, evict it and re-enter
- // the closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
@@ -947,7 +881,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -- optimization_marker : a int32 containing a non-zero optimization
+ // marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r1, r3, optimization_marker));
@@ -964,12 +899,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
if (FLAG_debug_code) {
- __ cmp(optimization_marker,
- Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
+ __ stop();
}
}
@@ -1099,18 +1033,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(r4, Operand(FEEDBACK_VECTOR_TYPE));
__ b(ne, &push_stack_frame);
- Register optimized_code_entry = r4;
+ Register optimization_state = r4;
- // Read off the optimized code slot in the feedback vector.
- __ ldr(optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ // Read off the optimization state in the feedback vector.
+ __ ldr(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty.
- Label optimized_code_slot_not_empty;
- __ cmp(optimized_code_entry,
- Operand(Smi::FromEnum(OptimizationMarker::kNone)));
- __ b(ne, &optimized_code_slot_not_empty);
+ // Check if the optimized code slot is not empty or has a optimization marker.
+ Label has_optimized_code_or_marker;
+ __ tst(
+ optimization_state,
+ Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ b(ne, &has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
@@ -1156,7 +1090,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ sub(r9, sp, Operand(r4));
- LoadStackLimit(masm, r2, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(r2, StackLimitKind::kRealStackLimit);
__ cmp(r9, Operand(r2));
__ b(lo, &stack_overflow);
@@ -1185,7 +1119,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
- LoadStackLimit(masm, r4, StackLimitKind::kInterruptStackLimit);
+ __ LoadStackLimit(r4, StackLimitKind::kInterruptStackLimit);
__ cmp(sp, r4);
__ b(lo, &stack_check_interrupt);
__ bind(&after_stack_check_interrupt);
@@ -1228,7 +1162,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&do_return);
// The return value is in r0.
- LeaveInterpreterFrame(masm, r2);
+ LeaveInterpreterFrame(masm, r2, r4);
__ Jump(lr);
__ bind(&stack_check_interrupt);
@@ -1255,19 +1189,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&optimized_code_slot_not_empty);
+ __ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
- // Check if optimized code marker is actually a weak reference to the
- // optimized code.
- __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
- MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+
+ // Check if optimized code is available
+ __ tst(
+ optimization_state,
+ Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ b(eq, &maybe_has_optimized_code);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
- // Load code entry from the weak reference, if it was cleared, resume
- // execution of unoptimized code.
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ Register optimized_code_entry = optimization_state;
+ __ ldr(optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
__ bind(&compile_lazy);
@@ -1287,12 +1228,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ mov(scratch, Operand(scratch, LSL, kSystemPointerSizeLog2));
__ sub(start_address, start_address, scratch);
// Push the arguments.
-#ifdef V8_REVERSE_JSARGS
__ PushArray(start_address, num_args, scratch,
TurboAssembler::PushArrayOrder::kReverse);
-#else
- __ PushArray(start_address, num_args, scratch);
-#endif
}
// static
@@ -1309,18 +1246,15 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// -----------------------------------
Label stack_overflow;
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ sub(r0, r0, Operand(1));
}
-#endif
__ add(r3, r0, Operand(1)); // Add one for receiver.
- Generate_StackOverflowCheck(masm, r3, r4, &stack_overflow);
+ __ StackOverflowCheck(r3, r4, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Don't copy receiver. Argument count is correct.
__ mov(r3, r0);
@@ -1341,21 +1275,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ sub(r2, r2, Operand(kSystemPointerSize));
__ ldr(r2, MemOperand(r2));
}
-#else
- // Push "undefined" as the receiver arg if we need to.
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(RootIndex::kUndefinedValue);
- __ mov(r3, r0); // Argument count is correct.
- }
-
- // Push the arguments. r2 and r4 will be modified.
- Generate_InterpreterPushArgs(masm, r3, r2, r4);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(r2); // Pass the spread in a register
- __ sub(r0, r0, Operand(1)); // Subtract one for spread
- }
-#endif
// Call the target.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
@@ -1388,9 +1307,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ add(r5, r0, Operand(1)); // Add one for receiver.
- Generate_StackOverflowCheck(masm, r5, r6, &stack_overflow);
+ __ StackOverflowCheck(r5, r6, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ sub(r0, r0, Operand(1));
@@ -1412,21 +1330,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
} else {
__ AssertUndefinedOrAllocationSite(r2, r5);
}
-#else
- // Push a slot for the receiver to be constructed.
- __ mov(r5, Operand::Zero());
- __ push(r5);
-
- // Push the arguments. r4 and r5 will be modified.
- Generate_InterpreterPushArgs(masm, r0, r4, r5);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(r2); // Pass the spread in a register
- __ sub(r0, r0, Operand(1)); // Subtract one for spread
- } else {
- __ AssertUndefinedOrAllocationSite(r2, r5);
- }
-#endif
if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(r1);
@@ -1590,7 +1493,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire(); // Temp register is not allocatable.
if (with_result) {
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin) {
__ mov(scratch, r0);
} else {
@@ -1602,14 +1504,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
sp, config->num_allocatable_general_registers() * kPointerSize +
BuiltinContinuationFrameConstants::kFixedFrameSize));
}
-#else
- // Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point.
- __ str(r0,
- MemOperand(
- sp, config->num_allocatable_general_registers() * kPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize));
-#endif
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -1618,7 +1512,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ SmiUntag(Register::from_code(code));
}
}
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin && with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. r0 contains the arguments count, the return value
@@ -1628,7 +1521,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Recover arguments count.
__ sub(r0, r0, Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
}
-#endif
__ ldr(fp, MemOperand(
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
// Load builtin index (stored as a Smi) and use it to get the builtin start
@@ -1715,9 +1607,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc
- // -- sp[0] : argArray
+ // -- sp[0] : receiver
// -- sp[4] : thisArg
- // -- sp[8] : receiver
+ // -- sp[8] : argArray
// -----------------------------------
// 1. Load receiver into r1, argArray into r2 (if present), remove all
@@ -1726,20 +1618,11 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
{
__ LoadRoot(r5, RootIndex::kUndefinedValue);
__ mov(r2, r5);
-#ifdef V8_REVERSE_JSARGS
__ ldr(r1, MemOperand(sp, 0)); // receiver
__ cmp(r0, Operand(1));
__ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
__ cmp(r0, Operand(2), ge);
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
-#else
- __ ldr(r1, MemOperand(sp, r0, LSL, kSystemPointerSizeLog2)); // receiver
- __ sub(r4, r0, Operand(1), SetCC);
- __ ldr(r5, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2), ge); // thisArg
- __ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r2, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2),
- ge); // argArray
-#endif
__ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
__ str(r5, MemOperand(sp, 0));
}
@@ -1774,7 +1657,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
-#ifdef V8_REVERSE_JSARGS
// 1. Get the callable to call (passed as receiver) from the stack.
__ Pop(r1);
@@ -1791,45 +1673,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Adjust the actual number of arguments.
__ sub(r0, r0, Operand(1));
-#else
- // 1. Make sure we have at least one argument.
- // r0: actual number of arguments
- {
- Label done;
- __ cmp(r0, Operand::Zero());
- __ b(ne, &done);
- __ PushRoot(RootIndex::kUndefinedValue);
- __ add(r0, r0, Operand(1));
- __ bind(&done);
- }
-
- // 2. Get the callable to call (passed as receiver) from the stack.
- // r0: actual number of arguments
- __ ldr(r1, __ ReceiverOperand(r0));
-
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- // r0: actual number of arguments
- // r1: callable
- {
- Register scratch = r3;
- Label loop;
- // Calculate the copy start address (destination). Copy end address is sp.
- __ add(r2, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
-
- __ bind(&loop);
- __ ldr(scratch, MemOperand(r2, -kSystemPointerSize));
- __ str(scratch, MemOperand(r2));
- __ sub(r2, r2, Operand(kSystemPointerSize));
- __ cmp(r2, sp);
- __ b(ne, &loop);
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ sub(r0, r0, Operand(1));
- __ pop();
- }
-#endif
// 4. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -1838,12 +1681,11 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc
- // -- sp[0] : argumentsList
- // -- sp[4] : thisArgument
- // -- sp[8] : target
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[4] : target (if argc >= 1)
+ // -- sp[8] : thisArgument (if argc >= 2)
+ // -- sp[12] : argumentsList (if argc == 3)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
// 1. Load target into r1 (if present), argumentsList into r2 (if present),
// remove all arguments from the stack (including the receiver), and push
@@ -1852,23 +1694,12 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRoot(r1, RootIndex::kUndefinedValue);
__ mov(r5, r1);
__ mov(r2, r1);
-#ifdef V8_REVERSE_JSARGS
__ cmp(r0, Operand(1));
__ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
__ cmp(r0, Operand(2), ge);
__ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
__ cmp(r0, Operand(3), ge);
__ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
-#else
- __ sub(r4, r0, Operand(1), SetCC);
- __ ldr(r1, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2), ge); // target
- __ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r5, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2),
- ge); // thisArgument
- __ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r2, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2),
- ge); // argumentsList
-#endif
__ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
__ str(r5, MemOperand(sp, 0));
}
@@ -1891,12 +1722,11 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc
- // -- sp[0] : new.target (optional)
- // -- sp[4] : argumentsList
- // -- sp[8] : target
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[4] : target
+ // -- sp[8] : argumentsList
+ // -- sp[12] : new.target (optional)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
// 1. Load target into r1 (if present), argumentsList into r2 (if present),
// new.target into r3 (if present, otherwise use target), remove all
@@ -1905,7 +1735,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
{
__ LoadRoot(r1, RootIndex::kUndefinedValue);
__ mov(r2, r1);
-#ifdef V8_REVERSE_JSARGS
__ mov(r4, r1);
__ cmp(r0, Operand(1));
__ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
@@ -1916,19 +1745,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
__ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
__ str(r4, MemOperand(sp, 0)); // set undefined to the receiver
-#else
- __ str(r2, MemOperand(sp, r0, LSL, kSystemPointerSizeLog2)); // receiver
- __ sub(r4, r0, Operand(1), SetCC);
- __ ldr(r1, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2), ge); // target
- __ mov(r3, r1); // new.target defaults to target
- __ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r2, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2),
- ge); // argumentsList
- __ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r3, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2),
- ge); // new.target
- __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
-#endif
}
// ----------- S t a t e -------------
@@ -2006,9 +1822,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
Label stack_overflow;
- Generate_StackOverflowCheck(masm, r4, scratch, &stack_overflow);
+ __ StackOverflowCheck(r4, scratch, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Move the arguments already in the stack,
// including the receiver and the return address.
{
@@ -2028,7 +1843,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bind(&check);
__ b(ge, &copy);
}
-#endif
// Copy arguments onto the stack (thisArgument is already on the stack).
{
@@ -2043,11 +1857,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ cmp(scratch, r5);
// Turn the hole into undefined as we go.
__ LoadRoot(scratch, RootIndex::kUndefinedValue, eq);
-#ifdef V8_REVERSE_JSARGS
__ str(scratch, MemOperand(r9, kSystemPointerSize, PostIndex));
-#else
- __ Push(scratch);
-#endif
__ add(r6, r6, Operand(1));
__ b(&loop);
__ bind(&done);
@@ -2092,6 +1902,12 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&new_target_constructor);
}
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
+ // code is erased.
+ __ mov(r4, fp);
+ __ ldr(r5, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+#else
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2115,6 +1931,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ SmiUntag(r5);
}
__ bind(&arguments_done);
+#endif
Label stack_done, stack_overflow;
__ sub(r5, r5, r2, SetCC);
@@ -2131,10 +1948,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -----------------------------------
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, r5, scratch, &stack_overflow);
+ __ StackOverflowCheck(r5, scratch, &stack_overflow);
// Forward the arguments from the caller frame.
-#ifdef V8_REVERSE_JSARGS
// Point to the first argument to copy (skipping the receiver).
__ add(r4, r4,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
@@ -2161,26 +1977,17 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&check);
__ b(ge, &copy);
}
-#endif
// Copy arguments from the caller frame.
// TODO(victorgomes): Consider using forward order as potentially more cache
// friendly.
{
Label loop;
-#ifndef V8_REVERSE_JSARGS
- // Skips frame pointer.
- __ add(r4, r4, Operand(CommonFrameConstants::kFixedFrameSizeAboveFp));
-#endif
__ add(r0, r0, r5);
__ bind(&loop);
{
__ sub(r5, r5, Operand(1), SetCC);
__ ldr(scratch, MemOperand(r4, r5, LSL, kSystemPointerSizeLog2));
-#ifdef V8_REVERSE_JSARGS
__ str(scratch, MemOperand(r2, r5, LSL, kSystemPointerSizeLog2));
-#else
- __ push(scratch);
-#endif
__ b(ne, &loop);
}
}
@@ -2334,8 +2141,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Compute the space we have left. The stack might already be overflowed
// here which will cause remaining_stack_size to become negative.
- LoadStackLimit(masm, remaining_stack_size,
- StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(remaining_stack_size,
+ StackLimitKind::kRealStackLimit);
__ sub(remaining_stack_size, sp, remaining_stack_size);
// Check if the arguments will overflow the stack.
@@ -2350,7 +2157,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
// Pop receiver.
__ Pop(r5);
@@ -2368,39 +2174,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Push receiver.
__ Push(r5);
-#else
- // Reserve stack space for the [[BoundArguments]].
- __ AllocateStackSpace(scratch);
-
- // Relocate arguments down the stack.
- {
- Label loop, done_loop;
- __ mov(r5, Operand(0));
- __ bind(&loop);
- __ cmp(r5, r0);
- __ b(gt, &done_loop);
- __ ldr(scratch, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2));
- __ str(scratch, MemOperand(sp, r5, LSL, kSystemPointerSizeLog2));
- __ add(r4, r4, Operand(1));
- __ add(r5, r5, Operand(1));
- __ b(&loop);
- __ bind(&done_loop);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop;
- __ ldr(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ SmiUntag(r4);
- __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- __ sub(r4, r4, Operand(1), SetCC);
- __ ldr(scratch, MemOperand(r2, r4, LSL, kPointerSizeLog2));
- __ str(scratch, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ add(r0, r0, Operand(1));
- __ b(gt, &loop);
- }
-#endif
}
__ bind(&no_bound_arguments);
}
@@ -2588,19 +2361,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r3 : new target (passed through to callee)
// -----------------------------------
- Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
+ Label dont_adapt_arguments, stack_overflow;
__ cmp(r2, Operand(kDontAdaptArgumentsSentinel));
__ b(eq, &dont_adapt_arguments);
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
-#ifndef V8_REVERSE_JSARGS
- // This optimization is disabled when the arguments are reversed.
- __ tst(r4,
- Operand(SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask));
- __ b(ne, &skip_adapt_arguments);
-#endif
-
// -------------------------------------------
// Adapt arguments.
// -------------------------------------------
@@ -2613,18 +2379,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&over_application);
{
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
+ __ StackOverflowCheck(r2, r5, &stack_overflow);
// Calculate copy start address into r0 and copy end address into r4.
// r0: actual number of arguments as a smi
// r1: function
// r2: expected number of arguments
// r3: new target (passed through to callee)
-#ifdef V8_REVERSE_JSARGS
__ add(r0, fp, Operand(r2, LSL, kSystemPointerSizeLog2));
-#else
- __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
-#endif
// adjust for return address and receiver
__ add(r0, r0, Operand(2 * kSystemPointerSize));
__ sub(r4, r0, Operand(r2, LSL, kSystemPointerSizeLog2));
@@ -2651,9 +2413,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&under_application);
{
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
+ __ StackOverflowCheck(r2, r5, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Fill the remaining expected arguments with undefined.
// r0: actual number of arguments as a smi
// r1: function
@@ -2695,47 +2456,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ cmp(r0, fp); // Compare before moving to next argument.
__ sub(r0, r0, Operand(kPointerSize));
__ b(ne, &copy);
-#else
- // Calculate copy start address into r0 and copy end address is fp.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r0: copy start address
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- Label copy;
- __ bind(&copy);
-
- // Adjust load for return address and receiver.
- __ ldr(r5, MemOperand(r0, 2 * kPointerSize));
- __ push(r5);
-
- __ cmp(r0, fp); // Compare before moving to next argument.
- __ sub(r0, r0, Operand(kPointerSize));
- __ b(ne, &copy);
-
- // Fill the remaining expected arguments with undefined.
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- __ LoadRoot(r5, RootIndex::kUndefinedValue);
- __ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2));
- // Adjust for frame.
- __ sub(r4, r4,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ push(r5);
- __ cmp(sp, r4);
- __ b(ne, &fill);
-#endif
}
// Call the entry point.
@@ -2758,41 +2478,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
// -------------------------------------------
- // Skip adapt arguments.
- // -------------------------------------------
- __ bind(&skip_adapt_arguments);
- {
- // The callee cannot observe the actual arguments, so it's safe to just
- // pass the expected arguments by massaging the stack appropriately. See
- // http://bit.ly/v8-faster-calls-with-arguments-mismatch for details.
- Label under_application, over_application;
- __ cmp(r0, r2);
- __ b(lt, &under_application);
-
- __ bind(&over_application);
- {
- // Remove superfluous parameters from the stack.
- __ sub(r4, r0, r2);
- __ mov(r0, r2);
- __ add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
- __ b(&dont_adapt_arguments);
- }
-
- __ bind(&under_application);
- {
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ LoadRoot(r4, RootIndex::kUndefinedValue);
- __ bind(&fill);
- __ add(r0, r0, Operand(1));
- __ push(r4);
- __ cmp(r0, r2);
- __ b(lt, &fill);
- __ b(&dont_adapt_arguments);
- }
- }
-
- // -------------------------------------------
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
@@ -3241,12 +2926,11 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- r2 : arguments count (not including the receiver)
// -- r3 : call data
// -- r0 : holder
- // -- sp[0] : last argument
+ // -- sp[0] : receiver
+ // -- sp[8] : first argument
// -- ...
- // -- sp[(argc - 1) * 4] : first argument
- // -- sp[(argc + 0) * 4] : receiver
+ // -- sp[(argc) * 8] : last argument
// -----------------------------------
- // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
Register api_function_address = r1;
Register argc = r2;
@@ -3314,12 +2998,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
-#ifdef V8_REVERSE_JSARGS
__ add(scratch, scratch, Operand((FCA::kArgsLength + 1) * kPointerSize));
-#else
- __ add(scratch, scratch, Operand((FCA::kArgsLength - 1) * kPointerSize));
- __ add(scratch, scratch, Operand(argc, LSL, kPointerSizeLog2));
-#endif
__ str(scratch, MemOperand(sp, 2 * kPointerSize));
// FunctionCallbackInfo::length_.
@@ -3461,6 +3140,251 @@ void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
__ Ret();
}
+namespace {
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Note: This is an overapproximation; we always reserve space for 32 double
+ // registers, even though the actual CPU may only support 16. In the latter
+ // case, SaveFPRegs and RestoreFPRegs still use 32 stack slots, but only fill
+ // 16.
+ static constexpr int kDoubleRegsSize =
+ kDoubleSize * DwVfpRegister::kNumRegisters;
+
+ // Save all allocatable VFP registers before messing with them.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ SaveFPRegs(sp, scratch);
+ }
+
+ // Save all general purpose registers before messing with them.
+ static constexpr int kNumberOfRegisters = Register::kNumRegisters;
+ STATIC_ASSERT(kNumberOfRegisters == 16);
+
+ // Everything but pc, lr and ip which will be saved but not restored.
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
+
+ // Push all 16 registers (needed to populate FrameDescription::registers_).
+ // TODO(v8:1588): Note that using pc with stm is deprecated, so we should
+ // perhaps handle this a bit differently.
+ __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
+
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ Move(scratch, ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, isolate));
+ __ str(fp, MemOperand(scratch));
+ }
+
+ static constexpr int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+ __ mov(r2, Operand(Deoptimizer::kFixedExitSizeMarker));
+ // Get the address of the location in the code object (r3) (return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register r4.
+ __ mov(r3, lr);
+ __ add(r4, sp, Operand(kSavedRegistersAreaSize));
+ __ sub(r4, fp, r4);
+
+ // Allocate a new deoptimizer object.
+ // Pass four arguments in r0 to r3 and fifth argument on stack.
+ __ PrepareCallCFunction(6);
+ __ mov(r0, Operand(0));
+ Label context_check;
+ __ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(r1, &context_check);
+ __ ldr(r0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ mov(r1, Operand(static_cast<int>(deopt_kind)));
+ // r2: bailout id already loaded.
+ // r3: code address or 0 already loaded.
+ __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
+ __ Move(r5, ExternalReference::isolate_address(isolate));
+ __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve "deoptimizer" object in register r0 and get the input
+ // frame descriptor pointer to r1 (deoptimizer->input_);
+ __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_.
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ ldr(r2, MemOperand(sp, i * kPointerSize));
+ __ str(r2, MemOperand(r1, offset));
+ }
+
+ // Copy double registers to double_registers_.
+ static constexpr int kDoubleRegsOffset =
+ FrameDescription::double_registers_offset();
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ Register src_location = r4;
+ __ add(src_location, sp, Operand(kNumberOfRegisters * kPointerSize));
+ __ RestoreFPRegs(src_location, scratch);
+
+ Register dst_location = r4;
+ __ add(dst_location, r1, Operand(kDoubleRegsOffset));
+ __ SaveFPRegsToHeap(dst_location, scratch);
+ }
+
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register zero = r4;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ mov(zero, Operand(0));
+ __ strb(zero, MemOperand(is_iterable));
+ }
+
+ // Remove the saved registers from the stack.
+ __ add(sp, sp, Operand(kSavedRegistersAreaSize));
+
+ // Compute a pointer to the unwinding limit in register r2; that is
+ // the first stack slot not part of the input frame.
+ __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
+ __ add(r2, r2, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ b(&pop_loop_header);
+ __ bind(&pop_loop);
+ __ pop(r4);
+ __ str(r4, MemOperand(r3, 0));
+ __ add(r3, r3, Operand(sizeof(uint32_t)));
+ __ bind(&pop_loop_header);
+ __ cmp(r2, sp);
+ __ b(ne, &pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ push(r0); // Preserve deoptimizer object across call.
+ // r0: deoptimizer object; r1: scratch.
+ __ PrepareCallCFunction(1);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ pop(r0); // Restore deoptimizer object (class Deoptimizer).
+
+ __ ldr(sp, MemOperand(r0, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: r4 = current "FrameDescription** output_",
+ // r1 = one past the last FrameDescription**.
+ __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
+ __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_.
+ __ add(r1, r4, Operand(r1, LSL, 2));
+ __ jmp(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
+ __ ldr(r2, MemOperand(r4, 0)); // output_[ix]
+ __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ sub(r3, r3, Operand(sizeof(uint32_t)));
+ __ add(r6, r2, Operand(r3));
+ __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
+ __ push(r6);
+ __ bind(&inner_loop_header);
+ __ cmp(r3, Operand::Zero());
+ __ b(ne, &inner_push_loop); // test for gt?
+ __ add(r4, r4, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ cmp(r4, r1);
+ __ b(lt, &outer_push_loop);
+
+ __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
+
+ // State:
+ // r1: Deoptimizer::input_ (FrameDescription*).
+ // r2: The last output FrameDescription pointer (FrameDescription*).
+
+ // Restore double registers from the input frame description.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ Register src_location = r6;
+ __ add(src_location, r1, Operand(kDoubleRegsOffset));
+ __ RestoreFPRegsFromHeap(src_location, scratch);
+ }
+
+ // Push pc and continuation from the last output frame.
+ __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
+ __ push(r6);
+ __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
+ __ push(r6);
+
+ // Push the registers from the last output frame.
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ ldr(r6, MemOperand(r2, offset));
+ __ push(r6);
+ }
+
+ // Restore the registers from the stack.
+ __ ldm(ia_w, sp, restored_regs); // all but pc registers.
+
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register one = r4;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ mov(one, Operand(1));
+ __ strb(one, MemOperand(is_iterable));
+ }
+
+ // Remove sp, lr and pc.
+ __ Drop(3);
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ pop(scratch); // get continuation, leave pc on stack
+ __ pop(lr);
+ __ Jump(scratch);
+ }
+
+ __ stop();
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index be6d70eb08..92c1fefa0a 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -74,41 +74,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
-
-void LoadStackLimit(MacroAssembler* masm, Register destination,
- StackLimitKind kind) {
- DCHECK(masm->root_array_available());
- Isolate* isolate = masm->isolate();
- ExternalReference limit =
- kind == StackLimitKind::kRealStackLimit
- ? ExternalReference::address_of_real_jslimit(isolate)
- : ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
-
- intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
- __ Ldr(destination, MemOperand(kRootRegister, offset));
-}
-
-void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Label* stack_overflow) {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.AcquireX();
-
- // Check the stack for overflow.
- // We are not trying to catch interruptions (e.g. debug break and
- // preemption) here, so the "real stack limit" is checked.
-
- LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ Sub(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ Cmp(scratch, Operand(num_args, LSL, kSystemPointerSizeLog2));
- __ B(le, stack_overflow);
-}
-
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
@@ -122,7 +87,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
Label stack_overflow;
- Generate_StackOverflowCheck(masm, x0, &stack_overflow);
+ __ StackOverflowCheck(x0, &stack_overflow);
// Enter a construct frame.
{
@@ -155,32 +120,28 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// stack to which arguments will be later copied.
__ SlotAddress(x2, argc);
-#ifndef V8_REVERSE_JSARGS
- // Poke the hole (receiver) in the highest slot.
- __ Str(x4, MemOperand(x2));
-#endif
-
// Store padding, if needed.
__ Tbnz(slot_count_without_rounding, 0, &already_aligned);
__ Str(padreg, MemOperand(x2, 1 * kSystemPointerSize));
__ Bind(&already_aligned);
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
+
// Copy arguments to the expression stack.
{
Register count = x2;
Register dst = x10;
Register src = x11;
__ SlotAddress(dst, 0);
-#ifdef V8_REVERSE_JSARGS
// Poke the hole (receiver).
__ Str(x4, MemOperand(dst));
__ Add(dst, dst, kSystemPointerSize); // Skip receiver.
__ Add(src, fp,
StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize); // Skip receiver.
-#else
- __ Add(src, fp, StandardFrameConstants::kCallerSPOffset);
-#endif
__ Mov(count, argc);
__ CopyDoubleWords(dst, src, count);
}
@@ -190,24 +151,25 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- x1: constructor function
// -- x3: new target
// If argc is odd:
- // -- sp[0*kSystemPointerSize]: argument n - 1
+ // -- sp[0*kSystemPointerSize]: the hole (receiver)
+ // -- sp[1*kSystemPointerSize]: argument 1
// -- ...
- // -- sp[(n-1)*kSystemPointerSize]: argument 1
- // -- sp[(n+0)*kSystemPointerSize]: the hole (receiver)
+ // -- sp[(n-1)*kSystemPointerSize]: argument (n - 1)
+ // -- sp[(n+0)*kSystemPointerSize]: argument n
// -- sp[(n+1)*kSystemPointerSize]: padding
// -- sp[(n+2)*kSystemPointerSize]: padding
// -- sp[(n+3)*kSystemPointerSize]: number of arguments (tagged)
// -- sp[(n+4)*kSystemPointerSize]: context (pushed by FrameScope)
// If argc is even:
- // -- sp[0*kSystemPointerSize]: argument n - 1
+ // -- sp[0*kSystemPointerSize]: the hole (receiver)
+ // -- sp[1*kSystemPointerSize]: argument 1
// -- ...
- // -- sp[(n-1)*kSystemPointerSize]: argument 1
- // -- sp[(n+0)*kSystemPointerSize]: the hole (receiver)
+ // -- sp[(n-1)*kSystemPointerSize]: argument (n - 1)
+ // -- sp[(n+0)*kSystemPointerSize]: argument n
// -- sp[(n+1)*kSystemPointerSize]: padding
// -- sp[(n+2)*kSystemPointerSize]: number of arguments (tagged)
// -- sp[(n+3)*kSystemPointerSize]: context (pushed by FrameScope)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
// Call the function.
__ InvokeFunctionWithNewTarget(x1, x3, argc, CALL_FUNCTION);
@@ -248,194 +210,192 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_JSConstructStubGeneric");
+ FrameScope scope(masm, StackFrame::MANUAL);
// Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
- Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+ __ EnterFrame(StackFrame::CONSTRUCT);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
- if (__ emit_debug_code()) {
- // Check that FrameScope pushed the context on to the stack already.
- __ Peek(x2, 0);
- __ Cmp(x2, cp);
- __ Check(eq, AbortReason::kUnexpectedValue);
- }
-
- // Preserve the incoming parameters on the stack.
- __ SmiTag(x0);
- __ Push(x0, x1, padreg, x3);
-
- // ----------- S t a t e -------------
- // -- sp[0*kSystemPointerSize]: new target
- // -- sp[1*kSystemPointerSize]: padding
- // -- x1 and sp[2*kSystemPointerSize]: constructor function
- // -- sp[3*kSystemPointerSize]: number of arguments (tagged)
- // -- sp[4*kSystemPointerSize]: context (pushed by FrameScope)
- // -----------------------------------
-
- __ LoadTaggedPointerField(
- x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
- __ DecodeField<SharedFunctionInfo::FunctionKindBits>(w4);
- __ JumpIfIsInRange(w4, kDefaultDerivedConstructor, kDerivedConstructor,
- &not_create_implicit_receiver);
-
- // If not derived class constructor: Allocate the new receiver object.
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
- x4, x5);
-
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
+ if (__ emit_debug_code()) {
+ // Check that FrameScope pushed the context on to the stack already.
+ __ Peek(x2, 0);
+ __ Cmp(x2, cp);
+ __ Check(eq, AbortReason::kUnexpectedValue);
+ }
- __ B(&post_instantiation_deopt_entry);
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(x0);
+ __ Push(x0, x1, padreg, x3);
- // Else: use TheHoleValue as receiver for constructor call
- __ Bind(&not_create_implicit_receiver);
- __ LoadRoot(x0, RootIndex::kTheHoleValue);
+ // ----------- S t a t e -------------
+ // -- sp[0*kSystemPointerSize]: new target
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- x1 and sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[4*kSystemPointerSize]: context (pushed by FrameScope)
+ // -----------------------------------
- // ----------- S t a t e -------------
- // -- x0: receiver
- // -- Slot 4 / sp[0*kSystemPointerSize]: new target
- // -- Slot 3 / sp[1*kSystemPointerSize]: padding
- // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
- // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[4*kSystemPointerSize]: context
- // -----------------------------------
- // Deoptimizer enters here.
- masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
- masm->pc_offset());
+ __ LoadTaggedPointerField(
+ x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(w4);
+ __ JumpIfIsInRange(w4, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver);
- __ Bind(&post_instantiation_deopt_entry);
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, x4,
+ x5);
- // Restore new target from the top of the stack.
- __ Peek(x3, 0 * kSystemPointerSize);
+ __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
- // Restore constructor function and argument count.
- __ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
- __ SmiUntag(x12, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ B(&post_instantiation_deopt_entry);
- // Copy arguments to the expression stack. The called function pops the
- // receiver along with its arguments, so we need an extra receiver on the
- // stack, in case we have to return it later.
+ // Else: use TheHoleValue as receiver for constructor call
+ __ Bind(&not_create_implicit_receiver);
+ __ LoadRoot(x0, RootIndex::kTheHoleValue);
- // Overwrite the new target with a receiver.
- __ Poke(x0, 0);
+ // ----------- S t a t e -------------
+ // -- x0: receiver
+ // -- Slot 4 / sp[0*kSystemPointerSize]: new target
+ // -- Slot 3 / sp[1*kSystemPointerSize]: padding
+ // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kSystemPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
- // Push two further copies of the receiver. One will be popped by the called
- // function. The second acts as padding if the number of arguments plus
- // receiver is odd - pushing receiver twice avoids branching. It also means
- // that we don't have to handle the even and odd cases specially on
- // InvokeFunction's return, as top of stack will be the receiver in either
- // case.
- __ Push(x0, x0);
+ __ Bind(&post_instantiation_deopt_entry);
- // ----------- S t a t e -------------
- // -- x3: new target
- // -- x12: number of arguments (untagged)
- // -- sp[0*kSystemPointerSize]: implicit receiver (overwrite if argc
- // odd)
- // -- sp[1*kSystemPointerSize]: implicit receiver
- // -- sp[2*kSystemPointerSize]: implicit receiver
- // -- sp[3*kSystemPointerSize]: padding
- // -- x1 and sp[4*kSystemPointerSize]: constructor function
- // -- sp[5*kSystemPointerSize]: number of arguments (tagged)
- // -- sp[6*kSystemPointerSize]: context
- // -----------------------------------
+ // Restore new target from the top of the stack.
+ __ Peek(x3, 0 * kSystemPointerSize);
- // Round the number of arguments down to the next even number, and claim
- // slots for the arguments. If the number of arguments was odd, the last
- // argument will overwrite one of the receivers pushed above.
- __ Bic(x10, x12, 1);
+ // Restore constructor function and argument count.
+ __ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ SmiUntag(x12, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
- // Check if we have enough stack space to push all arguments.
- Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, x10, &stack_overflow);
- __ B(&enough_stack_space);
+ // Copy arguments to the expression stack. The called function pops the
+ // receiver along with its arguments, so we need an extra receiver on the
+ // stack, in case we have to return it later.
- __ Bind(&stack_overflow);
- // Restore the context from the frame.
- __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ Unreachable();
+ // Overwrite the new target with a receiver.
+ __ Poke(x0, 0);
- __ Bind(&enough_stack_space);
- __ Claim(x10);
+ // Push two further copies of the receiver. One will be popped by the called
+ // function. The second acts as padding if the number of arguments plus
+ // receiver is odd - pushing receiver twice avoids branching. It also means
+ // that we don't have to handle the even and odd cases specially on
+ // InvokeFunction's return, as top of stack will be the receiver in either
+ // case.
+ __ Push(x0, x0);
- // Copy the arguments.
- {
- Register count = x2;
- Register dst = x10;
- Register src = x11;
- __ Mov(count, x12);
-#ifdef V8_REVERSE_JSARGS
- __ Poke(x0, 0); // Add the receiver.
- __ SlotAddress(dst, 1); // Skip receiver.
- __ Add(src, fp,
- StandardFrameConstants::kCallerSPOffset + kSystemPointerSize);
-#else
- __ SlotAddress(dst, 0);
- __ Add(src, fp, StandardFrameConstants::kCallerSPOffset);
-#endif
- __ CopyDoubleWords(dst, src, count);
- }
+ // ----------- S t a t e -------------
+ // -- x3: new target
+ // -- x12: number of arguments (untagged)
+ // -- sp[0*kSystemPointerSize]: implicit receiver (overwrite if argc
+ // odd)
+ // -- sp[1*kSystemPointerSize]: implicit receiver
+ // -- sp[2*kSystemPointerSize]: implicit receiver
+ // -- sp[3*kSystemPointerSize]: padding
+ // -- x1 and sp[4*kSystemPointerSize]: constructor function
+ // -- sp[5*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[6*kSystemPointerSize]: context
+ // -----------------------------------
- // Call the function.
- __ Mov(x0, x12);
- __ InvokeFunctionWithNewTarget(x1, x3, x0, CALL_FUNCTION);
+ // Round the number of arguments down to the next even number, and claim
+ // slots for the arguments. If the number of arguments was odd, the last
+ // argument will overwrite one of the receivers pushed above.
+ __ Bic(x10, x12, 1);
- // ----------- S t a t e -------------
- // -- sp[0*kSystemPointerSize]: implicit receiver
- // -- sp[1*kSystemPointerSize]: padding
- // -- sp[2*kSystemPointerSize]: constructor function
- // -- sp[3*kSystemPointerSize]: number of arguments
- // -- sp[4*kSystemPointerSize]: context
- // -----------------------------------
+ // Check if we have enough stack space to push all arguments.
+ Label stack_overflow;
+ __ StackOverflowCheck(x10, &stack_overflow);
+ __ Claim(x10);
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
- // Restore the context from the frame.
- __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ // Copy the arguments.
+ {
+ Register count = x2;
+ Register dst = x10;
+ Register src = x11;
+ __ Mov(count, x12);
+ __ Poke(x0, 0); // Add the receiver.
+ __ SlotAddress(dst, 1); // Skip receiver.
+ __ Add(src, fp,
+ StandardFrameConstants::kCallerSPOffset + kSystemPointerSize);
+ __ CopyDoubleWords(dst, src, count);
+ }
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, do_throw, leave_frame;
+ // Call the function.
+ __ Mov(x0, x12);
+ __ InvokeFunctionWithNewTarget(x1, x3, x0, CALL_FUNCTION);
- // If the result is undefined, we jump out to using the implicit receiver.
- __ CompareRoot(x0, RootIndex::kUndefinedValue);
- __ B(eq, &use_receiver);
+ // ----------- S t a t e -------------
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments
+ // -- sp[4*kSystemPointerSize]: context
+ // -----------------------------------
- // Otherwise we do a smi check and fall through to check if the return value
- // is a valid receiver.
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, leave_and_return, check_receiver;
+
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ CompareRoot(x0, RootIndex::kUndefinedValue);
+ __ B(ne, &check_receiver);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ Bind(&use_receiver);
+ __ Peek(x0, 0 * kSystemPointerSize);
+ __ CompareRoot(x0, RootIndex::kTheHoleValue);
+ __ B(eq, &do_throw);
+
+ __ Bind(&leave_and_return);
+ // Restore smi-tagged arguments count from the frame.
+ __ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
+ __ LeaveFrame(StackFrame::CONSTRUCT);
+ // Remove caller arguments from the stack and return.
+ __ DropArguments(x1, TurboAssembler::kCountExcludesReceiver);
+ __ Ret();
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(x0, &use_receiver);
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+ __ bind(&check_receiver);
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ JumpIfObjectType(x0, x4, x5, FIRST_JS_RECEIVER_TYPE, &leave_frame, ge);
- __ B(&use_receiver);
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(x0, &use_receiver);
- __ Bind(&do_throw);
- __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(x0, x4, x5, FIRST_JS_RECEIVER_TYPE, &leave_and_return,
+ ge);
+ __ B(&use_receiver);
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ Bind(&use_receiver);
- __ Peek(x0, 0 * kSystemPointerSize);
- __ CompareRoot(x0, RootIndex::kTheHoleValue);
- __ B(eq, &do_throw);
+ __ Bind(&do_throw);
+ // Restore the context from the frame.
+ __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ __ Unreachable();
- __ Bind(&leave_frame);
- // Restore smi-tagged arguments count from the frame.
- __ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
- // Leave construct frame.
- }
- // Remove caller arguments from the stack and return.
- __ DropArguments(x1, TurboAssembler::kCountExcludesReceiver);
- __ Ret();
+ __ Bind(&stack_overflow);
+ // Restore the context from the frame.
+ __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ Unreachable();
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
@@ -501,7 +461,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadStackLimit(masm, x10, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(x10, StackLimitKind::kRealStackLimit);
__ Cmp(sp, x10);
__ B(lo, &stack_overflow);
@@ -541,7 +501,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
{
Label loop, done;
__ Cbz(x10, &done);
-#ifdef V8_REVERSE_JSARGS
__ SlotAddress(x12, x10);
__ Add(x5, x5, Operand(x10, LSL, kTaggedSizeLog2));
__ Add(x5, x5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -549,15 +508,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Sub(x10, x10, 1);
__ LoadAnyTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex));
__ Str(x11, MemOperand(x12, -kSystemPointerSize, PostIndex));
-#else
- __ Mov(x12, 0);
- __ Bind(&loop);
- __ Sub(x10, x10, 1);
- __ Add(x11, x5, Operand(x12, LSL, kTaggedSizeLog2));
- __ LoadAnyTaggedField(x11, FieldMemOperand(x11, FixedArray::kHeaderSize));
- __ Poke(x11, Operand(x10, LSL, kSystemPointerSizeLog2));
- __ Add(x12, x12, 1);
-#endif
__ Cbnz(x10, &loop);
__ Bind(&done);
}
@@ -882,7 +832,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments.
Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, slots_to_claim, &stack_overflow);
+ __ StackOverflowCheck(slots_to_claim, &stack_overflow);
__ B(&enough_stack_space);
__ Bind(&stack_overflow);
@@ -896,17 +846,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ SlotAddress(scratch, slots_to_claim);
__ Str(padreg, MemOperand(scratch, -kSystemPointerSize));
-#ifdef V8_REVERSE_JSARGS
// Store receiver on the stack.
__ Poke(receiver, 0);
// Store function on the stack.
__ SlotAddress(scratch, argc);
__ Str(function, MemOperand(scratch, kSystemPointerSize));
-#else
- // Store receiver and function on the stack.
- __ SlotAddress(scratch, argc);
- __ Stp(receiver, function, MemOperand(scratch));
-#endif
// Copy arguments to the stack in a loop, in reverse order.
// x4: argc.
@@ -918,7 +862,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// scratch has been set to point to the location of the function, which
// marks the end of the argument copy.
-#ifdef V8_REVERSE_JSARGS
__ SlotAddress(x0, 1); // Skips receiver.
__ Bind(&loop);
// Load the handle.
@@ -930,18 +873,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Loop if we've not reached the end of copy marker.
__ Cmp(x0, scratch);
__ B(le, &loop);
-#else
- __ Bind(&loop);
- // Load the handle.
- __ Ldr(x11, MemOperand(argv, kSystemPointerSize, PostIndex));
- // Dereference the handle.
- __ Ldr(x11, MemOperand(x11));
- // Poke the result into the stack.
- __ Str(x11, MemOperand(scratch, -kSystemPointerSize, PreIndex));
- // Loop if we've not reached the end of copy marker.
- __ Cmp(sp, scratch);
- __ B(lt, &loop);
-#endif
__ Bind(&done);
@@ -1010,35 +941,51 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
OMIT_SMI_CHECK);
}
-static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
- Register args_size = scratch;
-
- // Get the arguments + receiver count.
- __ Ldr(args_size,
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+ Register scratch2) {
+ Register params_size = scratch1;
+ // Get the size of the formal parameters + receiver (in bytes).
+ __ Ldr(params_size,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ Ldr(args_size.W(),
- FieldMemOperand(args_size, BytecodeArray::kParameterSizeOffset));
+ __ Ldr(params_size.W(),
+ FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ Register actual_params_size = scratch2;
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Ldr(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ lsl(actual_params_size, actual_params_size, kSystemPointerSizeLog2);
+ __ Add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label corrected_args_count;
+ __ Cmp(params_size, actual_params_size);
+ __ B(ge, &corrected_args_count);
+ __ Mov(params_size, actual_params_size);
+ __ Bind(&corrected_args_count);
+#endif
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
if (__ emit_debug_code()) {
- __ Tst(args_size, kSystemPointerSize - 1);
+ __ Tst(params_size, kSystemPointerSize - 1);
__ Check(eq, AbortReason::kUnexpectedValue);
}
- __ Lsr(args_size, args_size, kSystemPointerSizeLog2);
- __ DropArguments(args_size);
+ __ Lsr(params_size, params_size, kSystemPointerSizeLog2);
+ __ DropArguments(params_size);
}
-// Tail-call |function_id| if |smi_entry| == |marker|
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register smi_entry,
- OptimizationMarker marker,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ CompareTaggedAndBranch(smi_entry, Operand(Smi::FromEnum(marker)), ne,
- &no_match);
+ __ CompareAndBranch(actual_marker, Operand(expected_marker), ne, &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
@@ -1054,17 +1001,22 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(x1, x3, optimized_code_entry, scratch));
Register closure = x1;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
+ &heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- Label found_deoptimized_code;
__ LoadTaggedPointerField(
scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Ldr(scratch.W(),
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
- &found_deoptimized_code);
+ &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
@@ -1079,10 +1031,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ Jump(x17);
}
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
@@ -1092,7 +1045,7 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -- x3 : new target (preserved for callee if needed, and caller)
// -- x1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -- optimization_marker : int32 containing non-zero optimization marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, x1, x3, optimization_marker));
@@ -1109,13 +1062,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
if (FLAG_debug_code) {
- __ CmpTagged(
- optimization_marker,
- Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
+ __ Unreachable();
}
}
@@ -1245,19 +1196,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
__ B(ne, &push_stack_frame);
- // Read off the optimized code slot in the feedback vector, and if there
+ // Read off the optimized state in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
- Register optimized_code_entry = x7;
- __ LoadAnyTaggedField(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ Register optimization_state = w7;
+ __ Ldr(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty.
- Label optimized_code_slot_not_empty;
- __ CompareTaggedAndBranch(optimized_code_entry,
- Operand(Smi::FromEnum(OptimizationMarker::kNone)),
- ne, &optimized_code_slot_not_empty);
+ // Check if there is optimized code or a optimization marker that needes to be
+ // processed.
+ Label has_optimized_code_or_marker;
+ __ TestAndBranchIfAnySet(
+ optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
+ &has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
@@ -1295,10 +1246,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Push actual argument count, bytecode array, Smi tagged bytecode array
// offset and an undefined (to properly align the stack pointer).
STATIC_ASSERT(TurboAssembler::kExtraSlotClaimedByPrologue == 1);
- __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ SmiTag(x6, kInterpreterBytecodeOffsetRegister);
- __ Push(kJavaScriptCallArgCountRegister, kInterpreterBytecodeArrayRegister,
- x6, kInterpreterAccumulatorRegister);
+ __ Push(kJavaScriptCallArgCountRegister, kInterpreterBytecodeArrayRegister);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ __ Push(x6, kInterpreterAccumulatorRegister);
// Allocate the local and temporary register file on the stack.
Label stack_overflow;
@@ -1312,7 +1263,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
- LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
__ Cmp(x10, scratch);
}
__ B(lo, &stack_overflow);
@@ -1343,7 +1294,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
- LoadStackLimit(masm, x10, StackLimitKind::kInterruptStackLimit);
+ __ LoadStackLimit(x10, StackLimitKind::kInterruptStackLimit);
__ Cmp(sp, x10);
__ B(lo, &stack_check_interrupt);
__ Bind(&after_stack_check_interrupt);
@@ -1385,7 +1336,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&do_return);
// The return value is in x0.
- LeaveInterpreterFrame(masm, x2);
+ LeaveInterpreterFrame(masm, x2, x4);
__ Ret();
__ bind(&stack_check_interrupt);
@@ -1412,19 +1363,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&optimized_code_slot_not_empty);
+ __ bind(&has_optimized_code_or_marker);
+
Label maybe_has_optimized_code;
- // Check if optimized code marker is actually a weak reference to the
- // optimized code as opposed to an optimization marker.
- __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
- MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+ // Check if optimized code is available
+ __ TestAndBranchIfAllClear(
+ optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
+ &maybe_has_optimized_code);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
- // Load code entry from the weak reference, if it was cleared, resume
- // execution of unoptimized code.
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ Register optimized_code_entry = x7;
+ __ LoadAnyTaggedField(
+ optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
__ bind(&compile_lazy);
@@ -1464,7 +1423,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// Add a stack check before pushing arguments.
Label stack_overflow, done;
- Generate_StackOverflowCheck(masm, slots_to_claim, &stack_overflow);
+ __ StackOverflowCheck(slots_to_claim, &stack_overflow);
__ B(&done);
__ Bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
@@ -1484,7 +1443,6 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ Poke(padreg, Operand(scratch, LSL, kSystemPointerSizeLog2));
}
-#ifdef V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ Mov(slots_to_copy, num_args);
__ SlotAddress(stack_addr, 1);
@@ -1513,33 +1471,6 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ LoadRoot(receiver, RootIndex::kUndefinedValue);
__ Poke(receiver, 0);
}
-#else // !V8_REVERSE_JSARGS
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- // Store "undefined" as the receiver arg if we need to.
- Register receiver = x14;
- __ LoadRoot(receiver, RootIndex::kUndefinedValue);
- __ SlotAddress(stack_addr, num_args);
- __ Str(receiver, MemOperand(stack_addr));
- __ Mov(slots_to_copy, num_args);
- } else {
- // If we're not given an explicit receiver to store, we'll need to copy it
- // together with the rest of the arguments.
- __ Add(slots_to_copy, num_args, 1);
- }
-
- __ Sub(last_arg_addr, first_arg_index,
- Operand(slots_to_copy, LSL, kSystemPointerSizeLog2));
- __ Add(last_arg_addr, last_arg_addr, kSystemPointerSize);
-
- // Load the final spread argument into spread_arg_out, if necessary.
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Ldr(spread_arg_out, MemOperand(last_arg_addr, -kSystemPointerSize));
- }
-
- // Copy the rest of the arguments.
- __ SlotAddress(stack_addr, 0);
- __ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy);
-#endif // !V8_REVERSE_JSARGS
}
// static
@@ -1764,7 +1695,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ Add(fp, sp, frame_size);
if (with_result) {
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin) {
__ mov(scratch, x0);
} else {
@@ -1773,12 +1703,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ Str(x0, MemOperand(
fp, BuiltinContinuationFrameConstants::kCallerSPOffset));
}
-#else
- // Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point.
- __ Str(x0,
- MemOperand(fp, BuiltinContinuationFrameConstants::kCallerSPOffset));
-#endif
}
// Restore registers in pairs.
@@ -1801,7 +1725,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
if (java_script_builtin) __ SmiUntag(kJavaScriptCallArgCountRegister);
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin && with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. r0 contains the arguments count, the return value
@@ -1815,7 +1738,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
BuiltinContinuationFrameConstants::kCallerSPOffset /
kSystemPointerSize);
}
-#endif
// Load builtin index (stored as a Smi) and use it to get the builtin start
// address from the builtins table.
@@ -1904,11 +1826,10 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
- // -- sp[0] : argArray (if argc == 2)
+ // -- sp[0] : receiver
// -- sp[8] : thisArg (if argc >= 1)
- // -- sp[16] : receiver
+ // -- sp[16] : argArray (if argc == 2)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply");
@@ -1925,7 +1846,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 1. Load receiver into x1, argArray into x2 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
-#ifdef V8_REVERSE_JSARGS
{
Label done;
__ Mov(this_arg, undefined_value);
@@ -1938,32 +1858,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Peek(arg_array, 2 * kSystemPointerSize);
__ bind(&done);
}
-#else // !V8_REVERSE_JSARGS
- {
- Register scratch = x11;
-
- // Push two undefined values on the stack, to put it in a consistent state
- // so that we can always read three arguments from it.
- __ Push(undefined_value, undefined_value);
-
- // The state of the stack (with arrows pointing to the slots we will read)
- // is as follows:
- //
- // argc = 0 argc = 1 argc = 2
- // -> sp[16]: receiver -> sp[24]: receiver -> sp[32]: receiver
- // -> sp[8]: undefined -> sp[16]: this_arg -> sp[24]: this_arg
- // -> sp[0]: undefined -> sp[8]: undefined -> sp[16]: arg_array
- // sp[0]: undefined sp[8]: undefined
- // sp[0]: undefined
- //
- // There are now always three arguments to read, in the slots starting from
- // slot argc.
- __ SlotAddress(scratch, argc);
- __ Ldp(arg_array, this_arg, MemOperand(scratch));
- __ Ldr(receiver, MemOperand(scratch, 2 * kSystemPointerSize));
- __ Drop(2); // Drop the undefined values we pushed above.
- }
-#endif // !V8_REVERSE_JSARGS
__ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
__ PushArgument(this_arg);
@@ -2022,7 +1916,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
}
Label arguments_ready;
-#ifdef V8_REVERSE_JSARGS
// 3. Shift arguments. It depends if the arguments is even or odd.
// That is if padding exists or not.
{
@@ -2051,30 +1944,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
TurboAssembler::kSrcLessThanDst);
__ Drop(2);
}
-#else // !V8_REVERSE_JSARGS
- // 3. Overwrite the receiver with padding. If argc is odd, this is all we
- // need to do.
- __ Poke(padreg, Operand(argc, LSL, kXRegSizeLog2));
- __ Tbnz(argc, 0, &arguments_ready);
-
- // 4. If argc is even:
- // Copy arguments two slots higher in memory, overwriting the original
- // receiver and padding.
- {
- Register copy_from = x10;
- Register copy_to = x11;
- Register count = x12;
- Register last_arg_slot = x13;
- __ Mov(count, argc);
- __ Sub(last_arg_slot, argc, 1);
- __ SlotAddress(copy_from, last_arg_slot);
- __ Add(copy_to, copy_from, 2 * kSystemPointerSize);
- __ CopyDoubleWords(copy_to, copy_from, count,
- TurboAssembler::kSrcLessThanDst);
- // Drop two slots. These are copies of the last two arguments.
- __ Drop(2);
- }
-#endif // !V8_REVERSE_JSARGS
// 5. Adjust argument count to make the original first argument the new
// receiver and call the callable.
@@ -2085,13 +1954,12 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : argc
- // -- sp[0] : argumentsList (if argc == 3)
- // -- sp[8] : thisArgument (if argc >= 2)
- // -- sp[16] : target (if argc >= 1)
- // -- sp[24] : receiver
+ // -- x0 : argc
+ // -- sp[0] : receiver
+ // -- sp[8] : target (if argc >= 1)
+ // -- sp[16] : thisArgument (if argc >= 2)
+ // -- sp[24] : argumentsList (if argc == 3)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
ASM_LOCATION("Builtins::Generate_ReflectApply");
@@ -2106,7 +1974,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// 1. Load target into x1 (if present), argumentsList into x2 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
-#ifdef V8_REVERSE_JSARGS
{
Label done;
__ Mov(target, undefined_value);
@@ -2122,45 +1989,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Peek(arguments_list, 3 * kSystemPointerSize);
__ bind(&done);
}
-#else // !V8_REVERSE_JSARGS
- {
- // Push four undefined values on the stack, to put it in a consistent state
- // so that we can always read the three arguments we need from it. The
- // fourth value is used for stack alignment.
- __ Push(undefined_value, undefined_value, undefined_value, undefined_value);
-
- // The state of the stack (with arrows pointing to the slots we will read)
- // is as follows:
- //
- // argc = 0 argc = 1 argc = 2
- // sp[32]: receiver sp[40]: receiver sp[48]: receiver
- // -> sp[24]: undefined -> sp[32]: target -> sp[40]: target
- // -> sp[16]: undefined -> sp[24]: undefined -> sp[32]: this_argument
- // -> sp[8]: undefined -> sp[16]: undefined -> sp[24]: undefined
- // sp[0]: undefined sp[8]: undefined sp[16]: undefined
- // sp[0]: undefined sp[8]: undefined
- // sp[0]: undefined
- // argc = 3
- // sp[56]: receiver
- // -> sp[48]: target
- // -> sp[40]: this_argument
- // -> sp[32]: arguments_list
- // sp[24]: undefined
- // sp[16]: undefined
- // sp[8]: undefined
- // sp[0]: undefined
- //
- // There are now always three arguments to read, in the slots starting from
- // slot (argc + 1).
- Register scratch = x10;
- __ SlotAddress(scratch, argc);
- __ Ldp(arguments_list, this_argument,
- MemOperand(scratch, 1 * kSystemPointerSize));
- __ Ldr(target, MemOperand(scratch, 3 * kSystemPointerSize));
-
- __ Drop(4); // Drop the undefined values we pushed above.
- }
-#endif // !V8_REVERSE_JSARGS
__ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
__ PushArgument(this_argument);
@@ -2182,12 +2010,11 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
- // -- sp[0] : new.target (optional)
- // -- sp[8] : argumentsList
- // -- sp[16] : target
- // -- sp[24] : receiver
+ // -- sp[0] : receiver
+ // -- sp[8] : target
+ // -- sp[16] : argumentsList
+ // -- sp[24] : new.target (optional)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
ASM_LOCATION("Builtins::Generate_ReflectConstruct");
@@ -2203,7 +2030,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// new.target into x3 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
-#ifdef V8_REVERSE_JSARGS
{
Label done;
__ Mov(target, undefined_value);
@@ -2220,48 +2046,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Peek(new_target, 3 * kSystemPointerSize);
__ bind(&done);
}
-#else // !V8_REVERSE_JSARGS
- {
- // Push four undefined values on the stack, to put it in a consistent state
- // so that we can always read the three arguments we need from it. The
- // fourth value is used for stack alignment.
- __ Push(undefined_value, undefined_value, undefined_value, undefined_value);
-
- // The state of the stack (with arrows pointing to the slots we will read)
- // is as follows:
- //
- // argc = 0 argc = 1 argc = 2
- // sp[32]: receiver sp[40]: receiver sp[48]: receiver
- // -> sp[24]: undefined -> sp[32]: target -> sp[40]: target
- // -> sp[16]: undefined -> sp[24]: undefined -> sp[32]: arguments_list
- // -> sp[8]: undefined -> sp[16]: undefined -> sp[24]: undefined
- // sp[0]: undefined sp[8]: undefined sp[16]: undefined
- // sp[0]: undefined sp[8]: undefined
- // sp[0]: undefined
- // argc = 3
- // sp[56]: receiver
- // -> sp[48]: target
- // -> sp[40]: arguments_list
- // -> sp[32]: new_target
- // sp[24]: undefined
- // sp[16]: undefined
- // sp[8]: undefined
- // sp[0]: undefined
- //
- // There are now always three arguments to read, in the slots starting from
- // slot (argc + 1).
- Register scratch = x10;
- __ SlotAddress(scratch, argc);
- __ Ldp(new_target, arguments_list,
- MemOperand(scratch, 1 * kSystemPointerSize));
- __ Ldr(target, MemOperand(scratch, 3 * kSystemPointerSize));
-
- __ Cmp(argc, 2);
- __ CmovX(new_target, target, ls); // target if argc <= 2.
-
- __ Drop(4); // Drop the undefined values we pushed above.
- }
-#endif // !V8_REVERSE_JSARGS
__ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
@@ -2319,9 +2103,7 @@ void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// one slot up or one slot down, as needed.
void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
Register len) {
- Label exit;
-#ifdef V8_REVERSE_JSARGS
- Label even;
+ Label exit, even;
Register slots_to_copy = x10;
Register slots_to_claim = x12;
@@ -2353,60 +2135,6 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
__ SlotAddress(dst, 0);
__ CopyDoubleWords(dst, src, slots_to_copy);
}
-#else // !V8_REVERSE_JSARGS
- Label len_odd;
- Register slots_to_copy = x10; // If needed.
- __ Add(slots_to_copy, argc, 1);
- __ Add(argc, argc, len);
- __ Tbnz(len, 0, &len_odd);
- __ Claim(len);
- __ B(&exit);
-
- __ Bind(&len_odd);
- // Claim space we need. If argc is even, slots_to_claim = len + 1, as we need
- // one extra padding slot. If argc is odd, we know that the original arguments
- // will have a padding slot we can reuse (since len is odd), so
- // slots_to_claim = len - 1.
- {
- Register scratch = x11;
- Register slots_to_claim = x12;
- __ Add(slots_to_claim, len, 1);
- __ And(scratch, argc, 1);
- __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
- __ Claim(slots_to_claim);
- }
-
- Label copy_down;
- __ Tbz(slots_to_copy, 0, &copy_down);
-
- // Copy existing arguments one slot up.
- {
- Register src = x11;
- Register dst = x12;
- Register scratch = x13;
- __ Sub(scratch, argc, 1);
- __ SlotAddress(src, scratch);
- __ SlotAddress(dst, argc);
- __ CopyDoubleWords(dst, src, slots_to_copy,
- TurboAssembler::kSrcLessThanDst);
- }
- __ B(&exit);
-
- // Copy existing arguments one slot down and add padding.
- __ Bind(&copy_down);
- {
- Register src = x11;
- Register dst = x12;
- Register scratch = x13;
- __ Add(src, len, 1);
- __ Mov(dst, len); // CopySlots will corrupt dst.
- __ CopySlots(dst, src, slots_to_copy);
- __ Add(scratch, argc, 1);
- __ Poke(padreg,
- Operand(scratch, LSL, kSystemPointerSizeLog2)); // Store padding.
- }
-
-#endif // !V8_REVERSE_JSARGS
__ Bind(&exit);
}
@@ -2446,7 +2174,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Register len = x4;
Label stack_overflow;
- Generate_StackOverflowCheck(masm, len, &stack_overflow);
+ __ StackOverflowCheck(len, &stack_overflow);
// Skip argument setup if we don't need to push any varargs.
Label done;
@@ -2467,7 +2195,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// We do not use the CompareRoot macro as it would do a LoadRoot behind the
// scenes and we want to avoid that in a loop.
// TODO(all): Consider using Ldp and Stp.
-#ifdef V8_REVERSE_JSARGS
Register dst = x16;
__ Add(dst, argc, Immediate(1)); // Consider the receiver as well.
__ SlotAddress(dst, dst);
@@ -2479,15 +2206,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Csel(scratch, scratch, undefined_value, ne);
__ Str(scratch, MemOperand(dst, kSystemPointerSize, PostIndex));
__ Cbnz(len, &loop);
-#else
- __ Bind(&loop);
- __ Sub(len, len, 1);
- __ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
- __ CmpTagged(scratch, the_hole_value);
- __ Csel(scratch, scratch, undefined_value, ne);
- __ Poke(scratch, Operand(len, LSL, kSystemPointerSizeLog2));
- __ Cbnz(len, &loop);
-#endif
}
__ Bind(&done);
// Tail-call to the actual Call or Construct builtin.
@@ -2529,12 +2247,18 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Bind(&new_target_constructor);
}
+ Register args_fp = x5;
+ Register len = x6;
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
+ // code is erased.
+ __ Mov(args_fp, fp);
+ __ Ldr(len, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+#else
// Check if we have an arguments adaptor frame below the function frame.
// args_fp will point to the frame that contains the actual arguments, which
// will be the current frame unless we have an arguments adaptor frame, in
// which case args_fp points to the arguments adaptor frame.
- Register args_fp = x5;
- Register len = x6;
{
Label arguments_adaptor, arguments_done;
Register scratch = x10;
@@ -2563,19 +2287,19 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
}
__ Bind(&arguments_done);
}
+#endif
Label stack_done, stack_overflow;
__ Subs(len, len, start_index);
__ B(le, &stack_done);
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, x6, &stack_overflow);
+ __ StackOverflowCheck(len, &stack_overflow);
Generate_PrepareForCopyingVarargs(masm, argc, len);
// Push varargs.
{
Register dst = x13;
-#ifdef V8_REVERSE_JSARGS
// Point to the fist argument to copy from (skipping receiver).
__ Add(args_fp, args_fp,
CommonFrameConstants::kFixedFrameSizeAboveFp + kSystemPointerSize);
@@ -2586,10 +2310,6 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ SlotAddress(dst, x10);
// Update total number of arguments.
__ Add(argc, argc, len);
-#else
- __ Add(args_fp, args_fp, CommonFrameConstants::kFixedFrameSizeAboveFp);
- __ SlotAddress(dst, 0);
-#endif
__ CopyDoubleWords(dst, args_fp, len);
}
__ B(&stack_done);
@@ -2739,7 +2459,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
Label done;
- LoadStackLimit(masm, x10, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(x10, StackLimitKind::kRealStackLimit);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
__ Sub(x10, sp, x10);
@@ -2750,7 +2470,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ Bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
Label copy_bound_args;
Register total_argc = x15;
Register slots_to_claim = x12;
@@ -2826,80 +2545,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
}
// Update argc.
__ Mov(argc, total_argc);
-#else // !V8_REVERSE_JSARGS
- // Check if we need padding.
- Label copy_args, copy_bound_args;
- Register total_argc = x15;
- Register slots_to_claim = x12;
- __ Add(total_argc, argc, bound_argc);
- __ Mov(slots_to_claim, bound_argc);
- __ Tbz(bound_argc, 0, &copy_args);
-
- // Load receiver before we start moving the arguments. We will only
- // need this in this path because the bound arguments are odd.
- Register receiver = x14;
- __ Peek(receiver, Operand(argc, LSL, kSystemPointerSizeLog2));
-
- // Claim space we need. If argc is even, slots_to_claim = bound_argc + 1,
- // as we need one extra padding slot. If argc is odd, we know that the
- // original arguments will have a padding slot we can reuse (since
- // bound_argc is odd), so slots_to_claim = bound_argc - 1.
- {
- Register scratch = x11;
- __ Add(slots_to_claim, bound_argc, 1);
- __ And(scratch, total_argc, 1);
- __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
- }
-
- // Copy bound arguments.
- __ Bind(&copy_args);
- // Skip claim and copy of existing arguments in the special case where we
- // do not need to claim any slots (this will be the case when
- // bound_argc == 1 and the existing arguments have padding we can reuse).
- __ Cbz(slots_to_claim, &copy_bound_args);
- __ Claim(slots_to_claim);
- {
- Register count = x10;
- // Relocate arguments to a lower address.
- __ Mov(count, argc);
- __ CopySlots(0, slots_to_claim, count);
-
- __ Bind(&copy_bound_args);
- // Copy [[BoundArguments]] to the stack (below the arguments). The first
- // element of the array is copied to the highest address.
- {
- Label loop;
- Register counter = x10;
- Register scratch = x11;
- Register copy_to = x12;
- __ Add(bound_argv, bound_argv,
- FixedArray::kHeaderSize - kHeapObjectTag);
- __ SlotAddress(copy_to, argc);
- __ Add(argc, argc,
- bound_argc); // Update argc to include bound arguments.
- __ Lsl(counter, bound_argc, kTaggedSizeLog2);
- __ Bind(&loop);
- __ Sub(counter, counter, kTaggedSize);
- __ LoadAnyTaggedField(scratch, MemOperand(bound_argv, counter));
- // Poke into claimed area of stack.
- __ Str(scratch, MemOperand(copy_to, kSystemPointerSize, PostIndex));
- __ Cbnz(counter, &loop);
- }
-
- {
- Label done;
- Register scratch = x10;
- __ Tbz(bound_argc, 0, &done);
- // Store receiver.
- __ Add(scratch, sp, Operand(total_argc, LSL, kSystemPointerSizeLog2));
- __ Str(receiver, MemOperand(scratch, kSystemPointerSize, PostIndex));
- __ Tbnz(total_argc, 0, &done);
- // Store padding.
- __ Str(padreg, MemOperand(scratch));
- __ Bind(&done);
- }
- }
-#endif // !V8_REVERSE_JSARGS
}
__ Bind(&no_bound_arguments);
}
@@ -3160,26 +2805,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Cmp(argc_expected, kDontAdaptArgumentsSentinel);
__ B(eq, &dont_adapt_arguments);
-#ifndef V8_REVERSE_JSARGS
- // This optimization is disabled when the arguments are reversed.
- Label adapt_arguments_in_place;
- Register argc_actual_minus_expected = x5;
-
- // When the difference between argc_actual and argc_expected is odd, we
- // create an arguments adaptor frame.
- __ Sub(argc_actual_minus_expected, argc_actual, argc_expected);
- __ Tbnz(argc_actual_minus_expected, 0, &create_adaptor_frame);
-
- // When the difference is even, check if we are allowed to adjust the
- // existing frame instead.
- __ LoadTaggedPointerField(
- x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
- __ TestAndBranchIfAnySet(
- w4, SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask,
- &adapt_arguments_in_place);
-#endif
-
// -------------------------------------------
// Create an arguments adaptor frame.
// -------------------------------------------
@@ -3198,7 +2823,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// receiver.
__ RecordComment("-- Stack check --");
__ Add(scratch1, argc_expected, 1);
- Generate_StackOverflowCheck(masm, scratch1, &stack_overflow);
+ __ StackOverflowCheck(scratch1, &stack_overflow);
// Round up number of slots to be even, to maintain stack alignment.
__ RecordComment("-- Allocate callee frame slots --");
@@ -3206,7 +2831,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bic(scratch1, scratch1, 1);
__ Claim(scratch1, kSystemPointerSize);
-#ifdef V8_REVERSE_JSARGS
// If we don't have enough arguments, fill the remaining expected
// arguments with undefined, otherwise skip this step.
Label enough_arguments;
@@ -3251,84 +2875,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Add(copy_from, fp, 2 * kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, argc_to_copy);
-#else // !V8_REVERSE_JSARGS
- Register argc_unused_actual = x14;
- Register scratch2 = x16;
-
- // Preparing the expected arguments is done in four steps, the order of
- // which is chosen so we can use LDP/STP and avoid conditional branches as
- // much as possible.
-
- __ Mov(copy_to, sp);
-
- // (1) If we don't have enough arguments, fill the remaining expected
- // arguments with undefined, otherwise skip this step.
- Label enough_arguments;
- __ Subs(scratch1, argc_actual, argc_expected);
- __ Csel(argc_unused_actual, xzr, scratch1, lt);
- __ Csel(argc_to_copy, argc_expected, argc_actual, ge);
- __ B(ge, &enough_arguments);
-
- // Fill the remaining expected arguments with undefined.
- __ RecordComment("-- Fill slots with undefined --");
- __ Sub(copy_end, copy_to, Operand(scratch1, LSL, kSystemPointerSizeLog2));
- __ LoadRoot(scratch1, RootIndex::kUndefinedValue);
-
- Label fill;
- __ Bind(&fill);
- __ Stp(scratch1, scratch1,
- MemOperand(copy_to, 2 * kSystemPointerSize, PostIndex));
- // We might write one slot extra, but that is ok because we'll overwrite it
- // below.
- __ Cmp(copy_end, copy_to);
- __ B(hi, &fill);
-
- // Correct copy_to, for the case where we wrote one additional slot.
- __ Mov(copy_to, copy_end);
-
- __ Bind(&enough_arguments);
- // (2) Copy all of the actual arguments, or as many as we need.
- Label skip_copy;
- __ RecordComment("-- Copy actual arguments --");
- __ Cbz(argc_to_copy, &skip_copy);
- __ Add(copy_end, copy_to,
- Operand(argc_to_copy, LSL, kSystemPointerSizeLog2));
- __ Add(copy_from, fp, 2 * kSystemPointerSize);
- // Adjust for difference between actual and expected arguments.
- __ Add(copy_from, copy_from,
- Operand(argc_unused_actual, LSL, kSystemPointerSizeLog2));
-
- // Copy arguments. We use load/store pair instructions, so we might
- // overshoot by one slot, but since we copy the arguments starting from the
- // last one, if we do overshoot, the extra slot will be overwritten later by
- // the receiver.
- Label copy_2_by_2;
- __ Bind(&copy_2_by_2);
- __ Ldp(scratch1, scratch2,
- MemOperand(copy_from, 2 * kSystemPointerSize, PostIndex));
- __ Stp(scratch1, scratch2,
- MemOperand(copy_to, 2 * kSystemPointerSize, PostIndex));
- __ Cmp(copy_end, copy_to);
- __ B(hi, &copy_2_by_2);
- __ Bind(&skip_copy);
-
- // (3) Store padding, which might be overwritten by the receiver, if it is
- // not necessary.
- __ RecordComment("-- Store padding --");
- __ Str(padreg, MemOperand(fp, -5 * kSystemPointerSize));
-
- // (4) Store receiver. Calculate target address from the sp to avoid
- // checking for padding. Storing the receiver will overwrite either the
- // extra slot we copied with the actual arguments, if we did copy one, or
- // the padding we stored above.
- __ RecordComment("-- Store receiver --");
- __ Add(copy_from, fp, 2 * kSystemPointerSize);
- __ Ldr(scratch1,
- MemOperand(copy_from, argc_actual, LSL, kSystemPointerSizeLog2));
- __ Str(scratch1,
- MemOperand(sp, argc_expected, LSL, kSystemPointerSizeLog2));
-#endif
-
// Arguments have been adapted. Now call the entry point.
__ RecordComment("-- Call entry point --");
__ Mov(argc_actual, argc_expected);
@@ -3349,46 +2895,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Ret();
}
-#ifndef V8_REVERSE_JSARGS
- // -----------------------------------------
- // Adapt arguments in the existing frame.
- // -----------------------------------------
- __ Bind(&adapt_arguments_in_place);
- {
- __ RecordComment("-- Update arguments in place --");
- // The callee cannot observe the actual arguments, so it's safe to just
- // pass the expected arguments by massaging the stack appropriately. See
- // http://bit.ly/v8-faster-calls-with-arguments-mismatch for details.
- Label under_application, over_application;
- __ Tbnz(argc_actual_minus_expected, kXSignBit, &under_application);
-
- __ Bind(&over_application);
- {
- // Remove superfluous arguments from the stack. The number of superflous
- // arguments is even.
- __ RecordComment("-- Over-application --");
- __ Mov(argc_actual, argc_expected);
- __ Drop(argc_actual_minus_expected);
- __ B(&dont_adapt_arguments);
- }
-
- __ Bind(&under_application);
- {
- // Fill remaining expected arguments with undefined values.
- __ RecordComment("-- Under-application --");
- Label fill;
- Register undef_value = x16;
- __ LoadRoot(undef_value, RootIndex::kUndefinedValue);
- __ Bind(&fill);
- __ Add(argc_actual, argc_actual, 2);
- __ Push(undef_value, undef_value);
- __ Cmp(argc_actual, argc_expected);
- __ B(lt, &fill);
- __ B(&dont_adapt_arguments);
- }
- }
-#endif
-
// -------------------------------------------
// Dont adapt arguments.
// -------------------------------------------
@@ -3915,12 +3421,11 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- x2 : arguments count (not including the receiver)
// -- x3 : call data
// -- x0 : holder
- // -- sp[0] : last argument
+ // -- sp[0] : receiver
+ // -- sp[8] : first argument
// -- ...
- // -- sp[(argc - 1) * 8] : first argument
- // -- sp[(argc + 0) * 8] : receiver
+ // -- sp[(argc) * 8] : last argument
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
Register api_function_address = x1;
Register argc = x2;
@@ -3990,14 +3495,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
-#ifdef V8_REVERSE_JSARGS
__ Add(scratch, scratch,
Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
-#else
- __ Add(scratch, scratch,
- Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
- __ Add(scratch, scratch, Operand(argc, LSL, kSystemPointerSizeLog2));
-#endif
__ Str(scratch, MemOperand(sp, 2 * kSystemPointerSize));
// FunctionCallbackInfo::length_.
@@ -4128,6 +3627,303 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ Ret();
}
+namespace {
+
+void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
+ int dst_offset, const CPURegList& reg_list,
+ const Register& temp0, const Register& temp1,
+ int src_offset = 0) {
+ DCHECK_EQ(reg_list.Count() % 2, 0);
+ UseScratchRegisterScope temps(masm);
+ CPURegList copy_to_input = reg_list;
+ int reg_size = reg_list.RegisterSizeInBytes();
+ DCHECK_EQ(temp0.SizeInBytes(), reg_size);
+ DCHECK_EQ(temp1.SizeInBytes(), reg_size);
+
+ // Compute some temporary addresses to avoid having the macro assembler set
+ // up a temp with an offset for accesses out of the range of the addressing
+ // mode.
+ Register src = temps.AcquireX();
+ masm->Add(src, sp, src_offset);
+ masm->Add(dst, dst, dst_offset);
+
+ // Write reg_list into the frame pointed to by dst.
+ for (int i = 0; i < reg_list.Count(); i += 2) {
+ masm->Ldp(temp0, temp1, MemOperand(src, i * reg_size));
+
+ CPURegister reg0 = copy_to_input.PopLowestIndex();
+ CPURegister reg1 = copy_to_input.PopLowestIndex();
+ int offset0 = reg0.code() * reg_size;
+ int offset1 = reg1.code() * reg_size;
+
+ // Pair up adjacent stores, otherwise write them separately.
+ if (offset1 == offset0 + reg_size) {
+ masm->Stp(temp0, temp1, MemOperand(dst, offset0));
+ } else {
+ masm->Str(temp0, MemOperand(dst, offset0));
+ masm->Str(temp1, MemOperand(dst, offset1));
+ }
+ }
+ masm->Sub(dst, dst, dst_offset);
+}
+
+void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
+ const Register& src_base, int src_offset) {
+ DCHECK_EQ(reg_list.Count() % 2, 0);
+ UseScratchRegisterScope temps(masm);
+ CPURegList restore_list = reg_list;
+ int reg_size = restore_list.RegisterSizeInBytes();
+
+ // Compute a temporary addresses to avoid having the macro assembler set
+ // up a temp with an offset for accesses out of the range of the addressing
+ // mode.
+ Register src = temps.AcquireX();
+ masm->Add(src, src_base, src_offset);
+
+ // No need to restore padreg.
+ restore_list.Remove(padreg);
+
+ // Restore every register in restore_list from src.
+ while (!restore_list.IsEmpty()) {
+ CPURegister reg0 = restore_list.PopLowestIndex();
+ CPURegister reg1 = restore_list.PopLowestIndex();
+ int offset0 = reg0.code() * reg_size;
+
+ if (reg1 == NoCPUReg) {
+ masm->Ldr(reg0, MemOperand(src, offset0));
+ break;
+ }
+
+ int offset1 = reg1.code() * reg_size;
+
+ // Pair up adjacent loads, otherwise read them separately.
+ if (offset1 == offset0 + reg_size) {
+ masm->Ldp(reg0, reg1, MemOperand(src, offset0));
+ } else {
+ masm->Ldr(reg0, MemOperand(src, offset0));
+ masm->Ldr(reg1, MemOperand(src, offset1));
+ }
+ }
+}
+
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // TODO(all): This code needs to be revisited. We probably only need to save
+ // caller-saved registers here. Callee-saved registers can be stored directly
+ // in the input frame.
+
+ // Save all allocatable double registers.
+ CPURegList saved_double_registers(
+ CPURegister::kVRegister, kDRegSizeInBits,
+ RegisterConfiguration::Default()->allocatable_double_codes_mask());
+ DCHECK_EQ(saved_double_registers.Count() % 2, 0);
+ __ PushCPURegList(saved_double_registers);
+
+ // We save all the registers except sp, lr, platform register (x18) and the
+ // masm scratches.
+ CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 28);
+ saved_registers.Remove(ip0);
+ saved_registers.Remove(ip1);
+ saved_registers.Remove(x18);
+ saved_registers.Combine(fp);
+ saved_registers.Align();
+ DCHECK_EQ(saved_registers.Count() % 2, 0);
+ __ PushCPURegList(saved_registers);
+
+ __ Mov(x3, Operand(ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, isolate)));
+ __ Str(fp, MemOperand(x3));
+
+ const int kSavedRegistersAreaSize =
+ (saved_registers.Count() * kXRegSize) +
+ (saved_double_registers.Count() * kDRegSize);
+
+ // Floating point registers are saved on the stack above core registers.
+ const int kDoubleRegistersOffset = saved_registers.Count() * kXRegSize;
+
+ Register bailout_id = x2;
+ Register code_object = x3;
+ Register fp_to_sp = x4;
+ __ Mov(bailout_id, Deoptimizer::kFixedExitSizeMarker);
+ // Get the address of the location in the code object. This is the return
+ // address for lazy deoptimization.
+ __ Mov(code_object, lr);
+ // Compute the fp-to-sp delta.
+ __ Add(fp_to_sp, sp, kSavedRegistersAreaSize);
+ __ Sub(fp_to_sp, fp, fp_to_sp);
+
+ // Allocate a new deoptimizer object.
+ __ Ldr(x1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+
+ // Ensure we can safely load from below fp.
+ DCHECK_GT(kSavedRegistersAreaSize, -StandardFrameConstants::kFunctionOffset);
+ __ Ldr(x0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+
+ // If x1 is a smi, zero x0.
+ __ Tst(x1, kSmiTagMask);
+ __ CzeroX(x0, eq);
+
+ __ Mov(x1, static_cast<int>(deopt_kind));
+ // Following arguments are already loaded:
+ // - x2: bailout id
+ // - x3: code object address
+ // - x4: fp-to-sp delta
+ __ Mov(x5, ExternalReference::isolate_address(isolate));
+
+ {
+ // Call Deoptimizer::New().
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve "deoptimizer" object in register x0.
+ Register deoptimizer = x0;
+
+ // Get the input frame descriptor pointer.
+ __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
+
+ // Copy core registers into the input frame.
+ CopyRegListToFrame(masm, x1, FrameDescription::registers_offset(),
+ saved_registers, x2, x3);
+
+ // Copy double registers to the input frame.
+ CopyRegListToFrame(masm, x1, FrameDescription::double_registers_offset(),
+ saved_double_registers, x2, x3, kDoubleRegistersOffset);
+
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.AcquireX();
+ __ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ strb(xzr, MemOperand(is_iterable));
+ }
+
+ // Remove the saved registers from the stack.
+ DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0);
+ __ Drop(kSavedRegistersAreaSize / kXRegSize);
+
+ // Compute a pointer to the unwinding limit in register x2; that is
+ // the first stack slot not part of the input frame.
+ Register unwind_limit = x2;
+ __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Add(x3, x1, FrameDescription::frame_content_offset());
+ __ SlotAddress(x1, 0);
+ __ Lsr(unwind_limit, unwind_limit, kSystemPointerSizeLog2);
+ __ Mov(x5, unwind_limit);
+ __ CopyDoubleWords(x3, x1, x5);
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // Since {unwind_limit} is the frame size up to the parameter count, we might
+ // end up with a unaligned stack pointer. This is later recovered when
+ // setting the stack pointer to {caller_frame_top_offset}.
+ __ Bic(unwind_limit, unwind_limit, 1);
+#endif
+ __ Drop(unwind_limit);
+
+ // Compute the output frame in the deoptimizer.
+ __ Push(padreg, x0); // Preserve deoptimizer object across call.
+ {
+ // Call Deoptimizer::ComputeOutputFrames().
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ Pop(x4, padreg); // Restore deoptimizer object (class Deoptimizer).
+
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
+ __ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
+ __ Mov(sp, scratch);
+ }
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, outer_loop_header;
+ __ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
+ __ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
+ __ Add(x1, x0, Operand(x1, LSL, kSystemPointerSizeLog2));
+ __ B(&outer_loop_header);
+
+ __ Bind(&outer_push_loop);
+ Register current_frame = x2;
+ Register frame_size = x3;
+ __ Ldr(current_frame, MemOperand(x0, kSystemPointerSize, PostIndex));
+ __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
+ __ Lsr(frame_size, x3, kSystemPointerSizeLog2);
+ __ Claim(frame_size);
+
+ __ Add(x7, current_frame, FrameDescription::frame_content_offset());
+ __ SlotAddress(x6, 0);
+ __ CopyDoubleWords(x6, x7, frame_size);
+
+ __ Bind(&outer_loop_header);
+ __ Cmp(x0, x1);
+ __ B(lt, &outer_push_loop);
+
+ __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
+ RestoreRegList(masm, saved_double_registers, x1,
+ FrameDescription::double_registers_offset());
+
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.AcquireX();
+ Register one = x4;
+ __ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ Mov(one, Operand(1));
+ __ strb(one, MemOperand(is_iterable));
+ }
+
+ // TODO(all): ARM copies a lot (if not all) of the last output frame onto the
+ // stack, then pops it all into registers. Here, we try to load it directly
+ // into the relevant registers. Is this correct? If so, we should improve the
+ // ARM code.
+
+ // Restore registers from the last output frame.
+ // Note that lr is not in the list of saved_registers and will be restored
+ // later. We can use it to hold the address of last output frame while
+ // reloading the other registers.
+ DCHECK(!saved_registers.IncludesAliasOf(lr));
+ Register last_output_frame = lr;
+ __ Mov(last_output_frame, current_frame);
+
+ RestoreRegList(masm, saved_registers, last_output_frame,
+ FrameDescription::registers_offset());
+
+ UseScratchRegisterScope temps(masm);
+ temps.Exclude(x17);
+ Register continuation = x17;
+ __ Ldr(continuation, MemOperand(last_output_frame,
+ FrameDescription::continuation_offset()));
+ __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ __ Autibsp();
+#endif
+ __ Br(continuation);
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index eed7bc6e97..e068cfa9d1 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -28,7 +28,7 @@ type never;
type Tagged generates 'TNode<MaybeObject>' constexpr 'MaybeObject';
type StrongTagged extends Tagged
- generates 'TNode<Object>' constexpr 'ObjectPtr';
+ generates 'TNode<Object>' constexpr 'Object';
type Smi extends StrongTagged generates 'TNode<Smi>' constexpr 'Smi';
type TaggedIndex extends StrongTagged
generates 'TNode<TaggedIndex>' constexpr 'TaggedIndex';
@@ -50,10 +50,11 @@ type Zero extends PositiveSmi;
type Uninitialized extends Tagged;
extern macro MakeWeak(HeapObject): WeakHeapObject;
-extern macro GetHeapObjectAssumeWeak(WeakHeapObject):
- HeapObject labels ClearedWeakPointer;
+extern macro GetHeapObjectAssumeWeak(MaybeObject): HeapObject labels IfCleared;
+extern macro GetHeapObjectIfStrong(MaybeObject): HeapObject labels IfNotStrong;
extern macro IsWeakOrCleared(MaybeObject): bool;
extern macro IsWeakReferenceToObject(MaybeObject, Object): bool;
+extern macro IsStrong(MaybeObject): bool;
macro StrongToWeak<T: type>(x: T): Weak<T> {
return %RawDownCast<Weak<T>>(MakeWeak(x));
@@ -110,7 +111,7 @@ type bint generates 'TNode<BInt>' constexpr 'BInt';
type string constexpr 'const char*';
// A Smi value containing a bitfield struct as its integer data.
-type SmiTagged<T : type extends uint31> extends Smi;
+@useParentTypeChecker type SmiTagged<T : type extends uint31> extends Smi;
// WARNING: The memory representation (i.e., in class fields and arrays) of
// float64_or_hole is just a float64 that may be the hole-representing
@@ -149,7 +150,7 @@ type ObjectHashTable extends HashTable
generates 'TNode<ObjectHashTable>';
extern class NumberDictionary extends HashTable;
-type RawPtr generates 'TNode<RawPtrT>' constexpr 'void*';
+type RawPtr generates 'TNode<RawPtrT>' constexpr 'Address';
type ExternalPointer
generates 'TNode<ExternalPointerT>' constexpr 'ExternalPointer_t';
extern class Code extends HeapObject;
@@ -166,6 +167,9 @@ type LayoutDescriptor extends ByteArray
generates 'TNode<LayoutDescriptor>';
extern class TransitionArray extends WeakFixedArray;
+extern operator '.length_intptr' macro LoadAndUntagWeakFixedArrayLength(
+ WeakFixedArray): intptr;
+
type InstanceType extends uint16 constexpr 'InstanceType';
type NoSharedNameSentinel extends Smi;
@@ -278,6 +282,7 @@ extern enum MessageTemplate {
kFirstArgumentNotRegExp,
kBigIntMixedTypes,
kTypedArrayTooShort,
+ kTypedArrayTooLargeToSort,
kInvalidCountValue,
kConstructorNotFunction,
kSymbolToString,
@@ -320,7 +325,6 @@ extern enum MessageTemplate {
kWasmTrapDivUnrepresentable,
kWasmTrapRemByZero,
kWasmTrapFloatUnrepresentable,
- kWasmTrapFuncInvalid,
kWasmTrapFuncSigMismatch,
kWasmTrapDataSegmentDropped,
kWasmTrapElemSegmentDropped,
@@ -330,7 +334,6 @@ extern enum MessageTemplate {
kWasmTrapNullDereference,
kWasmTrapIllegalCast,
kWasmTrapArrayOutOfBounds,
- kWasmTrapWasmJSFunction,
kWeakRefsRegisterTargetAndHoldingsMustNotBeSame,
kWeakRefsRegisterTargetMustBeObject,
kWeakRefsUnregisterTokenMustBeObject,
@@ -831,6 +834,10 @@ extern operator '==' macro
ConstexprInt31Equal(constexpr int31, constexpr int31): constexpr bool;
extern operator '!=' macro
ConstexprInt31NotEqual(constexpr int31, constexpr int31): constexpr bool;
+extern operator '==' macro
+ConstexprUint32Equal(constexpr uint32, constexpr uint32): constexpr bool;
+extern operator '!=' macro
+ConstexprUint32NotEqual(constexpr uint32, constexpr uint32): constexpr bool;
extern operator '>=' macro
ConstexprInt31GreaterThanEqual(
constexpr int31, constexpr int31): constexpr bool;
@@ -1555,6 +1562,7 @@ namespace runtime {
extern runtime
GetDerivedMap(Context, JSFunction, JSReceiver): Map;
}
+extern macro IsDeprecatedMap(Map): bool;
transitioning builtin FastCreateDataProperty(implicit context: Context)(
receiver: JSReceiver, key: JSAny, value: JSAny): Object {
@@ -1703,3 +1711,9 @@ struct ConstantIterator<T: type> {
macro ConstantIterator<T: type>(value: T): ConstantIterator<T> {
return ConstantIterator{value};
}
+
+extern macro FeedbackIteratorSizeFor(constexpr int32): intptr;
+extern macro FeedbackIteratorMapIndexForEntry(constexpr int32): intptr;
+extern macro FeedbackIteratorHandlerIndexForEntry(constexpr int32): intptr;
+extern operator '[]' macro LoadWeakFixedArrayElement(
+ WeakFixedArray, intptr): MaybeObject;
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index 6eb6f87c74..e42760d4d2 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -99,7 +99,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> HandleApiCallHelper(
}
}
- Object raw_call_data = fun_data->call_code();
+ Object raw_call_data = fun_data->call_code(kAcquireLoad);
if (!raw_call_data.IsUndefined(isolate)) {
DCHECK(raw_call_data.IsCallHandlerInfo());
CallHandlerInfo call_data = CallHandlerInfo::cast(raw_call_data);
@@ -206,7 +206,6 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
} else {
argv = new Address[frame_argc];
}
-#ifdef V8_REVERSE_JSARGS
argv[BuiltinArguments::kNewTargetOffset] = new_target->ptr();
argv[BuiltinArguments::kTargetOffset] = function->ptr();
argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc).ptr();
@@ -217,19 +216,6 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
for (int i = 0; i < argc; ++i) {
argv[cursor++] = args[i]->ptr();
}
-#else
- int cursor = frame_argc - 1;
- argv[cursor--] = receiver->ptr();
- for (int i = 0; i < argc; ++i) {
- argv[cursor--] = args[i]->ptr();
- }
- DCHECK_EQ(cursor, BuiltinArguments::kPaddingOffset);
- argv[BuiltinArguments::kPaddingOffset] =
- ReadOnlyRoots(isolate).the_hole_value().ptr();
- argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc).ptr();
- argv[BuiltinArguments::kTargetOffset] = function->ptr();
- argv[BuiltinArguments::kNewTargetOffset] = new_target->ptr();
-#endif
MaybeHandle<Object> result;
{
RelocatableArguments arguments(isolate, frame_argc, &argv[frame_argc - 1]);
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 134baeb96e..7a8ee5c415 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -221,10 +221,9 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
}
TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ CSA_ASSERT(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
@@ -246,11 +245,12 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
TNode<JSArray> array_receiver = CAST(receiver);
TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(array_receiver));
Label return_undefined(this), fast_elements(this);
- GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
// 2) Ensure that the length is writable.
EnsureArrayLengthWritable(context, LoadMap(array_receiver), &runtime);
+ GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
+
// 3) Check that the elements backing store isn't copy-on-write.
TNode<FixedArrayBase> elements = LoadElements(array_receiver);
GotoIf(TaggedEqual(LoadMap(elements), FixedCOWArrayMapConstant()),
@@ -321,10 +321,9 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
Label double_transition(this);
Label runtime(this, Label::kDeferred);
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ CSA_ASSERT(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
@@ -438,10 +437,10 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
}
TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSArray> array = CAST(Parameter(Descriptor::kSource));
- TNode<BInt> begin = SmiToBInt(CAST(Parameter(Descriptor::kBegin)));
- TNode<BInt> count = SmiToBInt(CAST(Parameter(Descriptor::kCount)));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto array = Parameter<JSArray>(Descriptor::kSource);
+ TNode<BInt> begin = SmiToBInt(Parameter<Smi>(Descriptor::kBegin));
+ TNode<BInt> count = SmiToBInt(Parameter<Smi>(Descriptor::kCount));
CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid()));
@@ -449,8 +448,8 @@ TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) {
}
TF_BUILTIN(CloneFastJSArray, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSArray> array = CAST(Parameter(Descriptor::kSource));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto array = Parameter<JSArray>(Descriptor::kSource);
CSA_ASSERT(this,
Word32Or(Word32BinaryNot(IsHoleyFastElementsKindForRead(
@@ -468,8 +467,8 @@ TF_BUILTIN(CloneFastJSArray, ArrayBuiltinsAssembler) {
// - If there are holes in the source, the ElementsKind of the "copy" will be
// PACKED_ELEMENTS (such that undefined can be stored).
TF_BUILTIN(CloneFastJSArrayFillingHoles, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSArray> array = CAST(Parameter(Descriptor::kSource));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto array = Parameter<JSArray>(Descriptor::kSource);
CSA_ASSERT(this,
Word32Or(Word32BinaryNot(IsHoleyFastElementsKindForRead(
@@ -543,9 +542,9 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> callbackfn = args.GetOptionalArgumentValue(0);
TNode<Object> this_arg = args.GetOptionalArgumentValue(1);
@@ -1068,28 +1067,28 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(
TF_BUILTIN(ArrayIncludes, ArrayIncludesIndexofAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
Generate(kIncludes, argc, context);
}
TF_BUILTIN(ArrayIncludesSmiOrObject, ArrayIncludesIndexofAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<FixedArray> elements = CAST(Parameter(Descriptor::kElements));
- TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
- TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
- TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto elements = Parameter<FixedArray>(Descriptor::kElements);
+ auto search_element = Parameter<Object>(Descriptor::kSearchElement);
+ auto array_length = Parameter<Smi>(Descriptor::kLength);
+ auto from_index = Parameter<Smi>(Descriptor::kFromIndex);
GenerateSmiOrObject(kIncludes, context, elements, search_element,
array_length, from_index);
}
TF_BUILTIN(ArrayIncludesPackedDoubles, ArrayIncludesIndexofAssembler) {
- TNode<FixedArrayBase> elements = CAST(Parameter(Descriptor::kElements));
- TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
- TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
- TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
+ auto elements = Parameter<FixedArrayBase>(Descriptor::kElements);
+ auto search_element = Parameter<Object>(Descriptor::kSearchElement);
+ auto array_length = Parameter<Smi>(Descriptor::kLength);
+ auto from_index = Parameter<Smi>(Descriptor::kFromIndex);
ReturnIfEmpty(array_length, FalseConstant());
GeneratePackedDoubles(kIncludes, CAST(elements), search_element, array_length,
@@ -1097,10 +1096,10 @@ TF_BUILTIN(ArrayIncludesPackedDoubles, ArrayIncludesIndexofAssembler) {
}
TF_BUILTIN(ArrayIncludesHoleyDoubles, ArrayIncludesIndexofAssembler) {
- TNode<FixedArrayBase> elements = CAST(Parameter(Descriptor::kElements));
- TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
- TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
- TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
+ auto elements = Parameter<FixedArrayBase>(Descriptor::kElements);
+ auto search_element = Parameter<Object>(Descriptor::kSearchElement);
+ auto array_length = Parameter<Smi>(Descriptor::kLength);
+ auto from_index = Parameter<Smi>(Descriptor::kFromIndex);
ReturnIfEmpty(array_length, FalseConstant());
GenerateHoleyDoubles(kIncludes, CAST(elements), search_element, array_length,
@@ -1109,28 +1108,28 @@ TF_BUILTIN(ArrayIncludesHoleyDoubles, ArrayIncludesIndexofAssembler) {
TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
Generate(kIndexOf, argc, context);
}
TF_BUILTIN(ArrayIndexOfSmiOrObject, ArrayIncludesIndexofAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<FixedArray> elements = CAST(Parameter(Descriptor::kElements));
- TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
- TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
- TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto elements = Parameter<FixedArray>(Descriptor::kElements);
+ auto search_element = Parameter<Object>(Descriptor::kSearchElement);
+ auto array_length = Parameter<Smi>(Descriptor::kLength);
+ auto from_index = Parameter<Smi>(Descriptor::kFromIndex);
GenerateSmiOrObject(kIndexOf, context, elements, search_element, array_length,
from_index);
}
TF_BUILTIN(ArrayIndexOfPackedDoubles, ArrayIncludesIndexofAssembler) {
- TNode<FixedArrayBase> elements = CAST(Parameter(Descriptor::kElements));
- TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
- TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
- TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
+ auto elements = Parameter<FixedArrayBase>(Descriptor::kElements);
+ auto search_element = Parameter<Object>(Descriptor::kSearchElement);
+ auto array_length = Parameter<Smi>(Descriptor::kLength);
+ auto from_index = Parameter<Smi>(Descriptor::kFromIndex);
ReturnIfEmpty(array_length, NumberConstant(-1));
GeneratePackedDoubles(kIndexOf, CAST(elements), search_element, array_length,
@@ -1138,10 +1137,10 @@ TF_BUILTIN(ArrayIndexOfPackedDoubles, ArrayIncludesIndexofAssembler) {
}
TF_BUILTIN(ArrayIndexOfHoleyDoubles, ArrayIncludesIndexofAssembler) {
- TNode<FixedArrayBase> elements = CAST(Parameter(Descriptor::kElements));
- TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
- TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
- TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
+ auto elements = Parameter<FixedArrayBase>(Descriptor::kElements);
+ auto search_element = Parameter<Object>(Descriptor::kSearchElement);
+ auto array_length = Parameter<Smi>(Descriptor::kLength);
+ auto from_index = Parameter<Smi>(Descriptor::kFromIndex);
ReturnIfEmpty(array_length, NumberConstant(-1));
GenerateHoleyDoubles(kIndexOf, CAST(elements), search_element, array_length,
@@ -1150,24 +1149,24 @@ TF_BUILTIN(ArrayIndexOfHoleyDoubles, ArrayIncludesIndexofAssembler) {
// ES #sec-array.prototype.values
TF_BUILTIN(ArrayPrototypeValues, CodeStubAssembler) {
- TNode<NativeContext> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<NativeContext>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Return(CreateArrayIterator(context, ToObject_Inline(context, receiver),
IterationKind::kValues));
}
// ES #sec-array.prototype.entries
TF_BUILTIN(ArrayPrototypeEntries, CodeStubAssembler) {
- TNode<NativeContext> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<NativeContext>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Return(CreateArrayIterator(context, ToObject_Inline(context, receiver),
IterationKind::kEntries));
}
// ES #sec-array.prototype.keys
TF_BUILTIN(ArrayPrototypeKeys, CodeStubAssembler) {
- TNode<NativeContext> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<NativeContext>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Return(CreateArrayIterator(context, ToObject_Inline(context, receiver),
IterationKind::kKeys));
}
@@ -1176,8 +1175,8 @@ TF_BUILTIN(ArrayPrototypeKeys, CodeStubAssembler) {
TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
const char* method_name = "Array Iterator.prototype.next";
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> maybe_iterator = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_iterator = Parameter<Object>(Descriptor::kReceiver);
TVARIABLE(Oddball, var_done, TrueConstant());
TVARIABLE(Object, var_value, UndefinedConstant());
@@ -1504,12 +1503,12 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
// https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSReceiver> target = CAST(Parameter(Descriptor::kTarget));
- TNode<JSReceiver> source = CAST(Parameter(Descriptor::kSource));
- TNode<Number> source_length = CAST(Parameter(Descriptor::kSourceLength));
- TNode<Number> start = CAST(Parameter(Descriptor::kStart));
- TNode<Number> depth = CAST(Parameter(Descriptor::kDepth));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto target = Parameter<JSReceiver>(Descriptor::kTarget);
+ auto source = Parameter<JSReceiver>(Descriptor::kSource);
+ auto source_length = Parameter<Number>(Descriptor::kSourceLength);
+ auto start = Parameter<Number>(Descriptor::kStart);
+ auto depth = Parameter<Number>(Descriptor::kDepth);
// FlattenIntoArray might get called recursively, check stack for overflow
// manually as it has stub linkage.
@@ -1521,15 +1520,14 @@ TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) {
// https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
TF_BUILTIN(FlatMapIntoArray, ArrayFlattenAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSReceiver> target = CAST(Parameter(Descriptor::kTarget));
- TNode<JSReceiver> source = CAST(Parameter(Descriptor::kSource));
- TNode<Number> source_length = CAST(Parameter(Descriptor::kSourceLength));
- TNode<Number> start = CAST(Parameter(Descriptor::kStart));
- TNode<Number> depth = CAST(Parameter(Descriptor::kDepth));
- TNode<HeapObject> mapper_function =
- CAST(Parameter(Descriptor::kMapperFunction));
- TNode<Object> this_arg = CAST(Parameter(Descriptor::kThisArg));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto target = Parameter<JSReceiver>(Descriptor::kTarget);
+ auto source = Parameter<JSReceiver>(Descriptor::kSource);
+ auto source_length = Parameter<Number>(Descriptor::kSourceLength);
+ auto start = Parameter<Number>(Descriptor::kStart);
+ auto depth = Parameter<Number>(Descriptor::kDepth);
+ auto mapper_function = Parameter<HeapObject>(Descriptor::kMapperFunction);
+ auto this_arg = Parameter<Object>(Descriptor::kThisArg);
Return(FlattenIntoArray(context, target, source, source_length, start, depth,
mapper_function, this_arg));
@@ -1538,9 +1536,9 @@ TF_BUILTIN(FlatMapIntoArray, ArrayFlattenAssembler) {
// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flat
TF_BUILTIN(ArrayPrototypeFlat, CodeStubAssembler) {
const TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto context = Parameter<Context>(Descriptor::kContext);
const TNode<Object> receiver = args.GetReceiver();
const TNode<Object> depth = args.GetOptionalArgumentValue(0);
@@ -1580,9 +1578,9 @@ TF_BUILTIN(ArrayPrototypeFlat, CodeStubAssembler) {
// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatMap
TF_BUILTIN(ArrayPrototypeFlatMap, CodeStubAssembler) {
const TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto context = Parameter<Context>(Descriptor::kContext);
const TNode<Object> receiver = args.GetReceiver();
const TNode<Object> mapper_function = args.GetOptionalArgumentValue(0);
@@ -1620,11 +1618,10 @@ TF_BUILTIN(ArrayPrototypeFlatMap, CodeStubAssembler) {
TF_BUILTIN(ArrayConstructor, ArrayBuiltinsAssembler) {
// This is a trampoline to ArrayConstructorImpl which just adds
// allocation_site parameter value and sets new_target if necessary.
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSFunction> function = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto function = Parameter<JSFunction>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
// If new_target is undefined, then this is the 'Call' case, so set new_target
// to function.
@@ -1785,12 +1782,11 @@ void ArrayBuiltinsAssembler::GenerateDispatchToArrayStub(
}
TF_BUILTIN(ArrayConstructorImpl, ArrayBuiltinsAssembler) {
- TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<HeapObject> maybe_allocation_site =
- CAST(Parameter(Descriptor::kAllocationSite));
+ auto target = Parameter<JSFunction>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto maybe_allocation_site =
+ Parameter<HeapObject>(Descriptor::kAllocationSite);
// Initial map for the builtin Array functions should be Map.
CSA_ASSERT(this, IsMap(CAST(LoadObjectField(
@@ -1877,12 +1873,12 @@ void ArrayBuiltinsAssembler::GenerateArrayNoArgumentConstructor(
ElementsKind kind, AllocationSiteOverrideMode mode) {
using Descriptor = ArrayNoArgumentConstructorDescriptor;
TNode<NativeContext> native_context = LoadObjectField<NativeContext>(
- CAST(Parameter(Descriptor::kFunction)), JSFunction::kContextOffset);
+ Parameter<HeapObject>(Descriptor::kFunction), JSFunction::kContextOffset);
bool track_allocation_site =
AllocationSite::ShouldTrack(kind) && mode != DISABLE_ALLOCATION_SITES;
base::Optional<TNode<AllocationSite>> allocation_site =
track_allocation_site
- ? CAST(Parameter(Descriptor::kAllocationSite))
+ ? Parameter<AllocationSite>(Descriptor::kAllocationSite)
: base::Optional<TNode<AllocationSite>>(base::nullopt);
TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
TNode<JSArray> array = AllocateJSArray(
@@ -1894,8 +1890,8 @@ void ArrayBuiltinsAssembler::GenerateArrayNoArgumentConstructor(
void ArrayBuiltinsAssembler::GenerateArraySingleArgumentConstructor(
ElementsKind kind, AllocationSiteOverrideMode mode) {
using Descriptor = ArraySingleArgumentConstructorDescriptor;
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> function = CAST(Parameter(Descriptor::kFunction));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto function = Parameter<HeapObject>(Descriptor::kFunction);
TNode<NativeContext> native_context =
CAST(LoadObjectField(function, JSFunction::kContextOffset));
TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
@@ -1907,11 +1903,9 @@ void ArrayBuiltinsAssembler::GenerateArraySingleArgumentConstructor(
: DONT_TRACK_ALLOCATION_SITE;
}
- TNode<Object> array_size =
- CAST(Parameter(Descriptor::kArraySizeSmiParameter));
+ auto array_size = Parameter<Object>(Descriptor::kArraySizeSmiParameter);
// allocation_site can be Undefined or an AllocationSite
- TNode<HeapObject> allocation_site =
- CAST(Parameter(Descriptor::kAllocationSite));
+ auto allocation_site = Parameter<HeapObject>(Descriptor::kAllocationSite);
GenerateConstructor(context, function, array_map, array_size, allocation_site,
kind, allocation_site_mode);
@@ -1934,12 +1928,11 @@ void ArrayBuiltinsAssembler::GenerateArrayNArgumentsConstructor(
}
TF_BUILTIN(ArrayNArgumentsConstructor, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSFunction> target = CAST(Parameter(Descriptor::kFunction));
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<HeapObject> maybe_allocation_site =
- CAST(Parameter(Descriptor::kAllocationSite));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto target = Parameter<JSFunction>(Descriptor::kFunction);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto maybe_allocation_site =
+ Parameter<HeapObject>(Descriptor::kAllocationSite);
GenerateArrayNArgumentsConstructor(context, target, target, argc,
maybe_allocation_site);
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 3c2fe33c5b..5467cf7c85 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -457,11 +457,11 @@ BUILTIN(ArrayPop) {
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
uint32_t len = static_cast<uint32_t>(array->length().Number());
- if (len == 0) return ReadOnlyRoots(isolate).undefined_value();
if (JSArray::HasReadOnlyLength(array)) {
return GenericArrayPop(isolate, &args);
}
+ if (len == 0) return ReadOnlyRoots(isolate).undefined_value();
Handle<Object> result;
if (IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
@@ -988,7 +988,7 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
Handle<String> string(String::cast(js_value->value()), isolate);
uint32_t length = static_cast<uint32_t>(string->length());
uint32_t i = 0;
- uint32_t limit = Min(length, range);
+ uint32_t limit = std::min(length, range);
for (; i < limit; i++) {
indices->push_back(i);
}
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index 62d7d820c0..0f5f905186 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -139,8 +139,8 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
// * If relativeStart < 0, let first be max((len + relativeStart), 0); else
// let first be min(relativeStart, len).
double const first = (relative_start->Number() < 0)
- ? Max(len + relative_start->Number(), 0.0)
- : Min(relative_start->Number(), len);
+ ? std::max(len + relative_start->Number(), 0.0)
+ : std::min(relative_start->Number(), len);
Handle<Object> first_obj = isolate->factory()->NewNumber(first);
// * If end is undefined, let relativeEnd be len; else let relativeEnd be ?
@@ -157,11 +157,11 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
// * If relativeEnd < 0, let final be max((len + relativeEnd), 0); else let
// final be min(relativeEnd, len).
- double const final_ = (relative_end < 0) ? Max(len + relative_end, 0.0)
- : Min(relative_end, len);
+ double const final_ = (relative_end < 0) ? std::max(len + relative_end, 0.0)
+ : std::min(relative_end, len);
// * Let newLen be max(final-first, 0).
- double const new_len = Max(final_ - first, 0.0);
+ double const new_len = std::max(final_ - first, 0.0);
Handle<Object> new_len_obj = isolate->factory()->NewNumber(new_len);
// * [AB] Let ctor be ? SpeciesConstructor(O, %ArrayBuffer%).
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index e84442295c..49b00caa04 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -77,9 +77,9 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
}
TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
- TNode<JSFunction> closure = CAST(Parameter(Descriptor::kClosure));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto closure = Parameter<JSFunction>(Descriptor::kClosure);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto context = Parameter<Context>(Descriptor::kContext);
// Compute the number of registers and parameters.
TNode<SharedFunctionInfo> shared = LoadObjectField<SharedFunctionInfo>(
@@ -175,11 +175,11 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
}
TF_BUILTIN(AsyncFunctionReject, AsyncFunctionBuiltinsAssembler) {
- TNode<JSAsyncFunctionObject> async_function_object =
- CAST(Parameter(Descriptor::kAsyncFunctionObject));
- TNode<Object> reason = CAST(Parameter(Descriptor::kReason));
- TNode<Oddball> can_suspend = CAST(Parameter(Descriptor::kCanSuspend));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto async_function_object =
+ Parameter<JSAsyncFunctionObject>(Descriptor::kAsyncFunctionObject);
+ auto reason = Parameter<Object>(Descriptor::kReason);
+ auto can_suspend = Parameter<Oddball>(Descriptor::kCanSuspend);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<JSPromise> promise = LoadObjectField<JSPromise>(
async_function_object, JSAsyncFunctionObject::kPromiseOffset);
@@ -200,11 +200,11 @@ TF_BUILTIN(AsyncFunctionReject, AsyncFunctionBuiltinsAssembler) {
}
TF_BUILTIN(AsyncFunctionResolve, AsyncFunctionBuiltinsAssembler) {
- TNode<JSAsyncFunctionObject> async_function_object =
- CAST(Parameter(Descriptor::kAsyncFunctionObject));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Oddball> can_suspend = CAST(Parameter(Descriptor::kCanSuspend));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto async_function_object =
+ Parameter<JSAsyncFunctionObject>(Descriptor::kAsyncFunctionObject);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto can_suspend = Parameter<Oddball>(Descriptor::kCanSuspend);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<JSPromise> promise = LoadObjectField<JSPromise>(
async_function_object, JSAsyncFunctionObject::kPromiseOffset);
@@ -224,14 +224,14 @@ TF_BUILTIN(AsyncFunctionResolve, AsyncFunctionBuiltinsAssembler) {
// the promise instead of the result of RejectPromise or ResolvePromise
// respectively from a lazy deoptimization.
TF_BUILTIN(AsyncFunctionLazyDeoptContinuation, AsyncFunctionBuiltinsAssembler) {
- TNode<JSPromise> promise = CAST(Parameter(Descriptor::kPromise));
+ auto promise = Parameter<JSPromise>(Descriptor::kPromise);
Return(promise);
}
TF_BUILTIN(AsyncFunctionAwaitRejectClosure, AsyncFunctionBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 1);
- const TNode<Object> sentError = CAST(Parameter(Descriptor::kSentError));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto sentError = Parameter<Object>(Descriptor::kSentError);
+ const auto context = Parameter<Context>(Descriptor::kContext);
AsyncFunctionAwaitResumeClosure(context, sentError,
JSGeneratorObject::kThrow);
@@ -240,8 +240,8 @@ TF_BUILTIN(AsyncFunctionAwaitRejectClosure, AsyncFunctionBuiltinsAssembler) {
TF_BUILTIN(AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 1);
- const TNode<Object> sentValue = CAST(Parameter(Descriptor::kSentValue));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto sentValue = Parameter<Object>(Descriptor::kSentValue);
+ const auto context = Parameter<Context>(Descriptor::kContext);
AsyncFunctionAwaitResumeClosure(context, sentValue, JSGeneratorObject::kNext);
Return(UndefinedConstant());
@@ -258,10 +258,10 @@ TF_BUILTIN(AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) {
template <typename Descriptor>
void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
const bool is_predicted_as_caught) {
- TNode<JSAsyncFunctionObject> async_function_object =
- CAST(Parameter(Descriptor::kAsyncFunctionObject));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto async_function_object =
+ Parameter<JSAsyncFunctionObject>(Descriptor::kAsyncFunctionObject);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<JSPromise> outer_promise = LoadObjectField<JSPromise>(
async_function_object, JSAsyncFunctionObject::kPromiseOffset);
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 383289fd0f..fa05e9b32a 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -269,7 +269,7 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(
StoreObjectFieldNoWriteBarrier(function, JSFunction::kContextOffset, context);
// For the native closures that are initialized here (for `await`)
- // we know that their SharedFunctionInfo::function_data() slot
+ // we know that their SharedFunctionInfo::function_data(kAcquireLoad) slot
// contains a builtin index (as Smi), so there's no need to use
// CodeStubAssembler::GetSharedFunctionInfoCode() helper here,
// which almost doubles the size of `await` builtins (unnecessarily).
@@ -303,8 +303,8 @@ TNode<Context> AsyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext(
}
TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncBuiltinsAssembler) {
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
const TNode<Object> done =
LoadContextElement(context, ValueUnwrapContext::kDoneSlot);
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 2b6d720880..c847d838b6 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -232,10 +232,10 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
template <typename Descriptor>
void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
- TNode<JSAsyncGeneratorObject> async_generator_object =
- CAST(Parameter(Descriptor::kAsyncGeneratorObject));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto async_generator_object =
+ Parameter<JSAsyncGeneratorObject>(Descriptor::kAsyncGeneratorObject);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<AsyncGeneratorRequest> request =
CAST(LoadFirstAsyncGeneratorRequestFromQueue(async_generator_object));
@@ -310,12 +310,12 @@ TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> generator = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kNext,
@@ -328,12 +328,12 @@ TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> generator = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kReturn,
@@ -346,12 +346,12 @@ TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> generator = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kThrow,
@@ -359,15 +359,15 @@ TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
}
TF_BUILTIN(AsyncGeneratorAwaitResolveClosure, AsyncGeneratorBuiltinsAssembler) {
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
AsyncGeneratorAwaitResumeClosure(context, value,
JSAsyncGeneratorObject::kNext);
}
TF_BUILTIN(AsyncGeneratorAwaitRejectClosure, AsyncGeneratorBuiltinsAssembler) {
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
AsyncGeneratorAwaitResumeClosure(context, value,
JSAsyncGeneratorObject::kThrow);
}
@@ -384,9 +384,9 @@ TF_BUILTIN(AsyncGeneratorAwaitCaught, AsyncGeneratorBuiltinsAssembler) {
TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
using Descriptor = AsyncGeneratorResumeNextDescriptor;
- const TNode<JSAsyncGeneratorObject> generator =
- CAST(Parameter(Descriptor::kGenerator));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto generator =
+ Parameter<JSAsyncGeneratorObject>(Descriptor::kGenerator);
+ const auto context = Parameter<Context>(Descriptor::kContext);
// The penultimate step of proposal-async-iteration/#sec-asyncgeneratorresolve
// and proposal-async-iteration/#sec-asyncgeneratorreject both recursively
@@ -475,11 +475,11 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
}
TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
- const TNode<JSAsyncGeneratorObject> generator =
- CAST(Parameter(Descriptor::kGenerator));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- const TNode<Object> done = CAST(Parameter(Descriptor::kDone));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto generator =
+ Parameter<JSAsyncGeneratorObject>(Descriptor::kGenerator);
+ const auto value = Parameter<Object>(Descriptor::kValue);
+ const auto done = Parameter<Object>(Descriptor::kDone);
+ const auto context = Parameter<Context>(Descriptor::kContext);
CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
@@ -546,10 +546,10 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
TF_BUILTIN(AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) {
using Descriptor = AsyncGeneratorRejectDescriptor;
- const TNode<JSAsyncGeneratorObject> generator =
- CAST(Parameter(Descriptor::kGenerator));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto generator =
+ Parameter<JSAsyncGeneratorObject>(Descriptor::kGenerator);
+ const auto value = Parameter<Object>(Descriptor::kValue);
+ const auto context = Parameter<Context>(Descriptor::kContext);
TNode<AsyncGeneratorRequest> next =
TakeFirstAsyncGeneratorRequestFromQueue(generator);
@@ -560,11 +560,10 @@ TF_BUILTIN(AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) {
}
TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
- const TNode<JSGeneratorObject> generator =
- CAST(Parameter(Descriptor::kGenerator));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- const TNode<Oddball> is_caught = CAST(Parameter(Descriptor::kIsCaught));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto generator = Parameter<JSGeneratorObject>(Descriptor::kGenerator);
+ const auto value = Parameter<Object>(Descriptor::kValue);
+ const auto is_caught = Parameter<Oddball>(Descriptor::kIsCaught);
+ const auto context = Parameter<Context>(Descriptor::kContext);
const TNode<AsyncGeneratorRequest> request =
CAST(LoadFirstAsyncGeneratorRequestFromQueue(generator));
@@ -579,8 +578,8 @@ TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
}
TF_BUILTIN(AsyncGeneratorYieldResolveClosure, AsyncGeneratorBuiltinsAssembler) {
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const auto context = Parameter<Context>(Descriptor::kContext);
+ const auto value = Parameter<Object>(Descriptor::kValue);
const TNode<JSAsyncGeneratorObject> generator =
CAST(LoadContextElement(context, Context::EXTENSION_INDEX));
@@ -611,10 +610,9 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
// (per proposal-async-iteration/#sec-asyncgeneratorresumenext step 10.b.i)
//
// In all cases, the final step is to jump back to AsyncGeneratorResumeNext.
- const TNode<JSGeneratorObject> generator =
- CAST(Parameter(Descriptor::kGenerator));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- const TNode<Oddball> is_caught = CAST(Parameter(Descriptor::kIsCaught));
+ const auto generator = Parameter<JSGeneratorObject>(Descriptor::kGenerator);
+ const auto value = Parameter<Object>(Descriptor::kValue);
+ const auto is_caught = Parameter<Oddball>(Descriptor::kIsCaught);
const TNode<AsyncGeneratorRequest> req =
CAST(LoadFirstAsyncGeneratorRequestFromQueue(generator));
@@ -635,7 +633,7 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
BIND(&perform_await);
SetGeneratorAwaiting(generator);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
const TNode<JSPromise> outer_promise =
LoadPromiseFromAsyncGeneratorRequest(req);
Await(context, generator, value, outer_promise, var_on_resolve.value(),
@@ -650,8 +648,8 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
// proposal-async-iteration/#sec-asyncgeneratoryield step 8.e
TF_BUILTIN(AsyncGeneratorReturnResolveClosure,
AsyncGeneratorBuiltinsAssembler) {
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const auto context = Parameter<Context>(Descriptor::kContext);
+ const auto value = Parameter<Object>(Descriptor::kValue);
AsyncGeneratorAwaitResumeClosure(context, value, JSGeneratorObject::kReturn);
}
@@ -660,8 +658,8 @@ TF_BUILTIN(AsyncGeneratorReturnResolveClosure,
// AsyncGeneratorResumeNext.
TF_BUILTIN(AsyncGeneratorReturnClosedResolveClosure,
AsyncGeneratorBuiltinsAssembler) {
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const auto context = Parameter<Context>(Descriptor::kContext);
+ const auto value = Parameter<Object>(Descriptor::kValue);
const TNode<JSAsyncGeneratorObject> generator =
CAST(LoadContextElement(context, Context::EXTENSION_INDEX));
@@ -678,8 +676,8 @@ TF_BUILTIN(AsyncGeneratorReturnClosedResolveClosure,
TF_BUILTIN(AsyncGeneratorReturnClosedRejectClosure,
AsyncGeneratorBuiltinsAssembler) {
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const auto context = Parameter<Context>(Descriptor::kContext);
+ const auto value = Parameter<Object>(Descriptor::kValue);
const TNode<JSAsyncGeneratorObject> generator =
CAST(LoadContextElement(context, Context::EXTENSION_INDEX));
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index 73e5605ccc..9e6223073f 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -274,12 +274,12 @@ AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
// Section #sec-%asyncfromsynciteratorprototype%.next
TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
const TNode<Object> iterator = args.GetReceiver();
const TNode<Object> value = args.GetOptionalArgumentValue(kValueOrReasonArg);
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto context = Parameter<Context>(Descriptor::kContext);
auto get_method = [=](const TNode<JSReceiver> unused) {
return LoadObjectField(CAST(iterator),
@@ -295,12 +295,12 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
AsyncFromSyncBuiltinsAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
const TNode<Object> iterator = args.GetReceiver();
const TNode<Object> value = args.GetOptionalArgumentValue(kValueOrReasonArg);
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto context = Parameter<Context>(Descriptor::kContext);
auto if_return_undefined = [=, &args](
const TNode<NativeContext> native_context,
@@ -328,12 +328,12 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrow,
AsyncFromSyncBuiltinsAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
const TNode<Object> iterator = args.GetReceiver();
const TNode<Object> reason = args.GetOptionalArgumentValue(kValueOrReasonArg);
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto context = Parameter<Context>(Descriptor::kContext);
auto if_throw_undefined = [=](const TNode<NativeContext> native_context,
const TNode<JSPromise> promise,
diff --git a/deps/v8/src/builtins/builtins-bigint-gen.cc b/deps/v8/src/builtins/builtins-bigint-gen.cc
index f8fe460c45..e424c53caf 100644
--- a/deps/v8/src/builtins/builtins-bigint-gen.cc
+++ b/deps/v8/src/builtins/builtins-bigint-gen.cc
@@ -17,8 +17,8 @@ TF_BUILTIN(BigIntToI64, CodeStubAssembler) {
return;
}
- TNode<Object> value = CAST(Parameter(Descriptor::kArgument));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto value = Parameter<Object>(Descriptor::kArgument);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<BigInt> n = ToBigInt(context, value);
TVARIABLE(UintPtrT, var_low);
@@ -35,8 +35,8 @@ TF_BUILTIN(BigIntToI32Pair, CodeStubAssembler) {
return;
}
- TNode<Object> value = CAST(Parameter(Descriptor::kArgument));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto value = Parameter<Object>(Descriptor::kArgument);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<BigInt> bigint = ToBigInt(context, value);
TVARIABLE(UintPtrT, var_low);
@@ -53,8 +53,7 @@ TF_BUILTIN(I64ToBigInt, CodeStubAssembler) {
return;
}
- TNode<IntPtrT> argument =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kArgument));
+ auto argument = UncheckedParameter<IntPtrT>(Descriptor::kArgument);
Return(BigIntFromInt64(argument));
}
@@ -66,8 +65,8 @@ TF_BUILTIN(I32PairToBigInt, CodeStubAssembler) {
return;
}
- TNode<IntPtrT> low = UncheckedCast<IntPtrT>(Parameter(Descriptor::kLow));
- TNode<IntPtrT> high = UncheckedCast<IntPtrT>(Parameter(Descriptor::kHigh));
+ auto low = UncheckedParameter<IntPtrT>(Descriptor::kLow);
+ auto high = UncheckedParameter<IntPtrT>(Descriptor::kHigh);
Return(BigIntFromInt32Pair(low, high));
}
diff --git a/deps/v8/src/builtins/bigint.tq b/deps/v8/src/builtins/builtins-bigint.tq
index 409301dcc9..067fb235de 100644
--- a/deps/v8/src/builtins/bigint.tq
+++ b/deps/v8/src/builtins/builtins-bigint.tq
@@ -4,24 +4,6 @@
#include 'src/builtins/builtins-bigint-gen.h'
-// TODO(nicohartmann): Discuss whether types used by multiple builtins should be
-// in global namespace
-extern class BigIntBase extends PrimitiveHeapObject
- generates 'TNode<BigInt>' {}
-
-type BigInt extends BigIntBase;
-
-@noVerifier
-@hasSameInstanceTypeAsParent
-@doNotGenerateCast
-extern class MutableBigInt extends BigIntBase generates 'TNode<BigInt>' {
-}
-
-Convert<BigInt, MutableBigInt>(i: MutableBigInt): BigInt {
- assert(bigint::IsCanonicalized(i));
- return %RawDownCast<BigInt>(Convert<BigIntBase>(i));
-}
-
namespace bigint {
const kPositiveSign: uint32 = 0;
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 61ae06bf9e..ffe7aa40e9 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -66,13 +66,12 @@ void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
TF_BUILTIN(Call_ReceiverIsNullOrUndefined_WithFeedback,
CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kFunction));
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ auto target = Parameter<Object>(Descriptor::kFunction);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
CollectCallFeedback(target, context, maybe_feedback_vector,
Unsigned(ChangeInt32ToIntPtr(slot)));
TailCallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined, context, target,
@@ -81,13 +80,12 @@ TF_BUILTIN(Call_ReceiverIsNullOrUndefined_WithFeedback,
TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_WithFeedback,
CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kFunction));
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ auto target = Parameter<Object>(Descriptor::kFunction);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
CollectCallFeedback(target, context, maybe_feedback_vector,
Unsigned(ChangeInt32ToIntPtr(slot)));
TailCallBuiltin(Builtins::kCall_ReceiverIsNotNullOrUndefined, context, target,
@@ -95,13 +93,12 @@ TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_WithFeedback,
}
TF_BUILTIN(Call_ReceiverIsAny_WithFeedback, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kFunction));
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ auto target = Parameter<Object>(Descriptor::kFunction);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
CollectCallFeedback(target, context, maybe_feedback_vector,
Unsigned(ChangeInt32ToIntPtr(slot)));
TailCallBuiltin(Builtins::kCall_ReceiverIsAny, context, target, argc);
@@ -425,46 +422,44 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
}
TF_BUILTIN(CallWithArrayLike, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ auto target = Parameter<Object>(Descriptor::kTarget);
base::Optional<TNode<Object>> new_target = base::nullopt;
- TNode<Object> arguments_list = CAST(Parameter(Descriptor::kArgumentsList));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto arguments_list = Parameter<Object>(Descriptor::kArgumentsList);
+ auto context = Parameter<Context>(Descriptor::kContext);
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
}
TF_BUILTIN(CallWithArrayLike_WithFeedback, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ auto target = Parameter<Object>(Descriptor::kTarget);
base::Optional<TNode<Object>> new_target = base::nullopt;
- TNode<Object> arguments_list = CAST(Parameter(Descriptor::kArgumentsList));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ auto arguments_list = Parameter<Object>(Descriptor::kArgumentsList);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
CollectCallFeedback(target, context, maybe_feedback_vector,
Unsigned(ChangeInt32ToIntPtr(slot)));
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
}
TF_BUILTIN(CallWithSpread, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ auto target = Parameter<Object>(Descriptor::kTarget);
base::Optional<TNode<Object>> new_target = base::nullopt;
- TNode<Object> spread = CAST(Parameter(Descriptor::kSpread));
- TNode<Int32T> args_count =
- UncheckedCast<Int32T>(Parameter(Descriptor::kArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto spread = Parameter<Object>(Descriptor::kSpread);
+ auto args_count = UncheckedParameter<Int32T>(Descriptor::kArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
TF_BUILTIN(CallWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ auto target = Parameter<Object>(Descriptor::kTarget);
base::Optional<TNode<Object>> new_target = base::nullopt;
- TNode<Object> spread = CAST(Parameter(Descriptor::kSpread));
- TNode<Int32T> args_count =
- UncheckedCast<Int32T>(Parameter(Descriptor::kArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ auto spread = Parameter<Object>(Descriptor::kSpread);
+ auto args_count = UncheckedParameter<Int32T>(Descriptor::kArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
CollectCallFeedback(target, context, maybe_feedback_vector,
Unsigned(ChangeInt32ToIntPtr(slot)));
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
@@ -647,8 +642,7 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate(
function_template_info, FunctionTemplateInfo::kCallCodeOffset);
TNode<Foreign> foreign = LoadObjectField<Foreign>(
call_handler_info, CallHandlerInfo::kJsCallbackOffset);
- TNode<RawPtrT> callback =
- DecodeExternalPointer(LoadForeignForeignAddress(foreign));
+ TNode<RawPtrT> callback = LoadForeignForeignAddressPtr(foreign);
TNode<Object> call_data =
LoadObjectField<Object>(call_handler_info, CallHandlerInfo::kDataOffset);
TailCallStub(CodeFactory::CallApiCallback(isolate()), context, callback, argc,
@@ -656,33 +650,30 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate(
}
TF_BUILTIN(CallFunctionTemplate_CheckAccess, CallOrConstructBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<FunctionTemplateInfo> function_template_info =
- CAST(Parameter(Descriptor::kFunctionTemplateInfo));
- TNode<IntPtrT> argc =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto function_template_info = UncheckedParameter<FunctionTemplateInfo>(
+ Descriptor::kFunctionTemplateInfo);
+ auto argc = UncheckedParameter<IntPtrT>(Descriptor::kArgumentsCount);
CallFunctionTemplate(CallFunctionTemplateMode::kCheckAccess,
function_template_info, argc, context);
}
TF_BUILTIN(CallFunctionTemplate_CheckCompatibleReceiver,
CallOrConstructBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<FunctionTemplateInfo> function_template_info =
- CAST(Parameter(Descriptor::kFunctionTemplateInfo));
- TNode<IntPtrT> argc =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto function_template_info = UncheckedParameter<FunctionTemplateInfo>(
+ Descriptor::kFunctionTemplateInfo);
+ auto argc = UncheckedParameter<IntPtrT>(Descriptor::kArgumentsCount);
CallFunctionTemplate(CallFunctionTemplateMode::kCheckCompatibleReceiver,
function_template_info, argc, context);
}
TF_BUILTIN(CallFunctionTemplate_CheckAccessAndCompatibleReceiver,
CallOrConstructBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<FunctionTemplateInfo> function_template_info =
- CAST(Parameter(Descriptor::kFunctionTemplateInfo));
- TNode<IntPtrT> argc =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto function_template_info = UncheckedParameter<FunctionTemplateInfo>(
+ Descriptor::kFunctionTemplateInfo);
+ auto argc = UncheckedParameter<IntPtrT>(Descriptor::kArgumentsCount);
CallFunctionTemplate(
CallFunctionTemplateMode::kCheckAccessAndCompatibleReceiver,
function_template_info, argc, context);
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index 63e4d7a572..5b7807ed4a 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -53,22 +53,6 @@ BUILTIN(CallSitePrototypeGetColumnNumber) {
return PositiveNumberOrNull(it.Frame()->GetColumnNumber(), isolate);
}
-BUILTIN(CallSitePrototypeGetEnclosingColumnNumber) {
- HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getEnclosingColumnNumber");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return PositiveNumberOrNull(it.Frame()->GetEnclosingColumnNumber(), isolate);
-}
-
-BUILTIN(CallSitePrototypeGetEnclosingLineNumber) {
- HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getEnclosingLineNumber");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return PositiveNumberOrNull(it.Frame()->GetEnclosingLineNumber(), isolate);
-}
-
BUILTIN(CallSitePrototypeGetEvalOrigin) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "getEvalOrigin");
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index 9769d785b5..9046c7d008 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -850,20 +850,20 @@ TNode<HeapObject> CollectionsBuiltinsAssembler::AllocateTable(
}
TF_BUILTIN(MapConstructor, CollectionsBuiltinsAssembler) {
- TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
+ auto new_target = Parameter<Object>(Descriptor::kJSNewTarget);
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
GenerateConstructor(kMap, isolate()->factory()->Map_string(), new_target,
argc, context);
}
TF_BUILTIN(SetConstructor, CollectionsBuiltinsAssembler) {
- TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
+ auto new_target = Parameter<Object>(Descriptor::kJSNewTarget);
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
GenerateConstructor(kSet, isolate()->factory()->Set_string(), new_target,
argc, context);
@@ -1160,8 +1160,8 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList(
}
TF_BUILTIN(MapIteratorToList, CollectionsBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSMapIterator> iterator = CAST(Parameter(Descriptor::kSource));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterator = Parameter<JSMapIterator>(Descriptor::kSource);
Return(MapIteratorToList(context, iterator));
}
@@ -1247,8 +1247,8 @@ TNode<JSArray> CollectionsBuiltinsAssembler::SetOrSetIteratorToList(
}
TF_BUILTIN(SetOrSetIteratorToList, CollectionsBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> object = CAST(Parameter(Descriptor::kSource));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto object = Parameter<HeapObject>(Descriptor::kSource);
Return(SetOrSetIteratorToList(context, object));
}
@@ -1421,8 +1421,8 @@ void CollectionsBuiltinsAssembler::SameValueZeroHeapNumber(
}
TF_BUILTIN(OrderedHashTableHealIndex, CollectionsBuiltinsAssembler) {
- TNode<HeapObject> table = CAST(Parameter(Descriptor::kTable));
- TNode<Smi> index = CAST(Parameter(Descriptor::kIndex));
+ auto table = Parameter<HeapObject>(Descriptor::kTable);
+ auto index = Parameter<Smi>(Descriptor::kIndex);
Label return_index(this), return_zero(this);
// Check if we need to update the {index}.
@@ -1561,9 +1561,9 @@ CollectionsBuiltinsAssembler::NextSkipHoles(TNode<TableType> table,
}
TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.get");
@@ -1587,9 +1587,9 @@ TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(MapPrototypeHas, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.has");
@@ -1628,10 +1628,10 @@ const TNode<Object> CollectionsBuiltinsAssembler::NormalizeNumberKey(
}
TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ const auto value = Parameter<Object>(Descriptor::kValue);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.set");
@@ -1746,9 +1746,9 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry(
}
TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
"Map.prototype.delete");
@@ -1805,9 +1805,9 @@ TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.add");
@@ -1914,9 +1914,9 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry(
}
TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
"Set.prototype.delete");
@@ -1969,8 +1969,8 @@ TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(MapPrototypeEntries, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
"Map.prototype.entries");
Return(AllocateJSCollectionIterator<JSMapIterator>(
@@ -1978,8 +1978,8 @@ TF_BUILTIN(MapPrototypeEntries, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(MapPrototypeGetSize, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
"get Map.prototype.size");
const TNode<OrderedHashMap> table =
@@ -1989,9 +1989,8 @@ TF_BUILTIN(MapPrototypeGetSize, CollectionsBuiltinsAssembler) {
TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
const char* const kMethodName = "Map.prototype.forEach";
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
+ const auto context = Parameter<Context>(Descriptor::kContext);
CodeStubArguments args(this, argc);
const TNode<Object> receiver = args.GetReceiver();
const TNode<Object> callback = args.GetOptionalArgumentValue(0);
@@ -2051,16 +2050,16 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(MapPrototypeKeys, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.keys");
Return(AllocateJSCollectionIterator<JSMapIterator>(
context, Context::MAP_KEY_ITERATOR_MAP_INDEX, CAST(receiver)));
}
TF_BUILTIN(MapPrototypeValues, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
"Map.prototype.values");
Return(AllocateJSCollectionIterator<JSMapIterator>(
@@ -2069,8 +2068,8 @@ TF_BUILTIN(MapPrototypeValues, CollectionsBuiltinsAssembler) {
TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
const char* const kMethodName = "Map Iterator.prototype.next";
- const TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto maybe_receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
// Ensure that {maybe_receiver} is actually a JSMapIterator.
Label if_receiver_valid(this), if_receiver_invalid(this, Label::kDeferred);
@@ -2145,9 +2144,9 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.has");
@@ -2206,8 +2205,8 @@ TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(SetPrototypeEntries, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
"Set.prototype.entries");
Return(AllocateJSCollectionIterator<JSSetIterator>(
@@ -2215,8 +2214,8 @@ TF_BUILTIN(SetPrototypeEntries, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(SetPrototypeGetSize, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
"get Set.prototype.size");
const TNode<OrderedHashSet> table =
@@ -2226,9 +2225,8 @@ TF_BUILTIN(SetPrototypeGetSize, CollectionsBuiltinsAssembler) {
TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
const char* const kMethodName = "Set.prototype.forEach";
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
+ const auto context = Parameter<Context>(Descriptor::kContext);
CodeStubArguments args(this, argc);
const TNode<Object> receiver = args.GetReceiver();
const TNode<Object> callback = args.GetOptionalArgumentValue(0);
@@ -2281,8 +2279,8 @@ TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(SetPrototypeValues, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
"Set.prototype.values");
Return(AllocateJSCollectionIterator<JSSetIterator>(
@@ -2291,8 +2289,8 @@ TF_BUILTIN(SetPrototypeValues, CollectionsBuiltinsAssembler) {
TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
const char* const kMethodName = "Set Iterator.prototype.next";
- const TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto maybe_receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
// Ensure that {maybe_receiver} is actually a JSSetIterator.
Label if_receiver_valid(this), if_receiver_invalid(this, Label::kDeferred);
@@ -2404,8 +2402,8 @@ void CollectionsBuiltinsAssembler::TryLookupOrderedHashTableIndex(
}
TF_BUILTIN(FindOrderedHashMapEntry, CollectionsBuiltinsAssembler) {
- const TNode<OrderedHashMap> table = CAST(Parameter(Descriptor::kTable));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ const auto table = Parameter<OrderedHashMap>(Descriptor::kTable);
+ const auto key = Parameter<Object>(Descriptor::kKey);
TVARIABLE(IntPtrT, entry_start_position, IntPtrConstant(0));
Label entry_found(this), not_found(this);
@@ -2698,28 +2696,28 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::ValueIndexFromKeyIndex(
}
TF_BUILTIN(WeakMapConstructor, WeakCollectionsBuiltinsAssembler) {
- TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
+ auto new_target = Parameter<Object>(Descriptor::kJSNewTarget);
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
GenerateConstructor(kWeakMap, isolate()->factory()->WeakMap_string(),
new_target, argc, context);
}
TF_BUILTIN(WeakSetConstructor, WeakCollectionsBuiltinsAssembler) {
- TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
+ auto new_target = Parameter<Object>(Descriptor::kJSNewTarget);
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
GenerateConstructor(kWeakSet, isolate()->factory()->WeakSet_string(),
new_target, argc, context);
}
TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) {
- TNode<EphemeronHashTable> table = CAST(Parameter(Descriptor::kTable));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ auto table = Parameter<EphemeronHashTable>(Descriptor::kTable);
+ auto key = Parameter<Object>(Descriptor::kKey);
Label if_not_found(this);
@@ -2736,9 +2734,9 @@ TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) {
}
TF_BUILTIN(WeakMapGet, WeakCollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
Label return_undefined(this);
@@ -2758,9 +2756,9 @@ TF_BUILTIN(WeakMapGet, WeakCollectionsBuiltinsAssembler) {
}
TF_BUILTIN(WeakMapPrototypeHas, WeakCollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
Label return_false(this);
@@ -2782,9 +2780,9 @@ TF_BUILTIN(WeakMapPrototypeHas, WeakCollectionsBuiltinsAssembler) {
// Helper that removes the entry with a given key from the backing store
// (EphemeronHashTable) of a WeakMap or WeakSet.
TF_BUILTIN(WeakCollectionDelete, WeakCollectionsBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSWeakCollection> collection = CAST(Parameter(Descriptor::kCollection));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto collection = Parameter<JSWeakCollection>(Descriptor::kCollection);
+ auto key = Parameter<Object>(Descriptor::kKey);
Label call_runtime(this), if_not_found(this);
@@ -2812,10 +2810,10 @@ TF_BUILTIN(WeakCollectionDelete, WeakCollectionsBuiltinsAssembler) {
// Helper that sets the key and value to the backing store (EphemeronHashTable)
// of a WeakMap or WeakSet.
TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSWeakCollection> collection = CAST(Parameter(Descriptor::kCollection));
- TNode<JSReceiver> key = CAST(Parameter(Descriptor::kKey));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto collection = Parameter<JSWeakCollection>(Descriptor::kCollection);
+ auto key = Parameter<JSReceiver>(Descriptor::kKey);
+ auto value = Parameter<Object>(Descriptor::kValue);
CSA_ASSERT(this, IsJSReceiver(key));
@@ -2862,9 +2860,9 @@ TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) {
}
TF_BUILTIN(WeakMapPrototypeDelete, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kKey);
ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE,
"WeakMap.prototype.delete");
@@ -2873,10 +2871,10 @@ TF_BUILTIN(WeakMapPrototypeDelete, CodeStubAssembler) {
}
TF_BUILTIN(WeakMapPrototypeSet, WeakCollectionsBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ auto value = Parameter<Object>(Descriptor::kValue);
ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE,
"WeakMap.prototype.set");
@@ -2892,9 +2890,9 @@ TF_BUILTIN(WeakMapPrototypeSet, WeakCollectionsBuiltinsAssembler) {
}
TF_BUILTIN(WeakSetPrototypeAdd, WeakCollectionsBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto value = Parameter<Object>(Descriptor::kValue);
ThrowIfNotInstanceType(context, receiver, JS_WEAK_SET_TYPE,
"WeakSet.prototype.add");
@@ -2910,9 +2908,9 @@ TF_BUILTIN(WeakSetPrototypeAdd, WeakCollectionsBuiltinsAssembler) {
}
TF_BUILTIN(WeakSetPrototypeDelete, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto value = Parameter<Object>(Descriptor::kValue);
ThrowIfNotInstanceType(context, receiver, JS_WEAK_SET_TYPE,
"WeakSet.prototype.delete");
@@ -2922,9 +2920,9 @@ TF_BUILTIN(WeakSetPrototypeDelete, CodeStubAssembler) {
}
TF_BUILTIN(WeakSetPrototypeHas, WeakCollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
Label return_false(this);
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index ecab531e2c..3cd4503471 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -37,14 +37,13 @@ void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) {
}
TF_BUILTIN(Construct_WithFeedback, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ auto target = Parameter<Object>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
TVARIABLE(AllocationSite, allocation_site);
Label if_construct_generic(this), if_construct_array(this);
@@ -62,22 +61,22 @@ TF_BUILTIN(Construct_WithFeedback, CallOrConstructBuiltinsAssembler) {
}
TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Object> arguments_list = CAST(Parameter(Descriptor::kArgumentsList));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto target = Parameter<Object>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto arguments_list = Parameter<Object>(Descriptor::kArgumentsList);
+ auto context = Parameter<Context>(Descriptor::kContext);
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
}
TF_BUILTIN(ConstructWithArrayLike_WithFeedback,
CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Object> arguments_list = CAST(Parameter(Descriptor::kArgumentsList));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ auto target = Parameter<Object>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto arguments_list = Parameter<Object>(Descriptor::kArgumentsList);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
TVARIABLE(AllocationSite, allocation_site);
Label if_construct_generic(this), if_construct_array(this);
@@ -94,25 +93,25 @@ TF_BUILTIN(ConstructWithArrayLike_WithFeedback,
}
TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Object> spread = CAST(Parameter(Descriptor::kSpread));
- TNode<Int32T> args_count =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto target = Parameter<Object>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto spread = Parameter<Object>(Descriptor::kSpread);
+ auto args_count =
+ UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
TF_BUILTIN(ConstructWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Object> spread = CAST(Parameter(Descriptor::kSpread));
- TNode<Int32T> args_count =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ auto target = Parameter<Object>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto spread = Parameter<Object>(Descriptor::kSpread);
+ auto args_count =
+ UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
TVARIABLE(AllocationSite, allocation_site);
Label if_construct_generic(this), if_construct_array(this);
@@ -131,11 +130,10 @@ TF_BUILTIN(ConstructWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
using Node = compiler::Node;
TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
- TNode<SharedFunctionInfo> shared_function_info =
- CAST(Parameter(Descriptor::kSharedFunctionInfo));
- TNode<FeedbackCell> feedback_cell =
- CAST(Parameter(Descriptor::kFeedbackCell));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto shared_function_info =
+ Parameter<SharedFunctionInfo>(Descriptor::kSharedFunctionInfo);
+ auto feedback_cell = Parameter<FeedbackCell>(Descriptor::kFeedbackCell);
+ auto context = Parameter<Context>(Descriptor::kContext);
IncrementCounter(isolate()->counters()->fast_new_closure_total(), 1);
@@ -219,9 +217,9 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
}
TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
- TNode<JSReceiver> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto target = Parameter<JSFunction>(Descriptor::kTarget);
+ auto new_target = Parameter<JSReceiver>(Descriptor::kNewTarget);
Label call_runtime(this);
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index cf56c5366c..35865c70cb 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -14,22 +14,22 @@ namespace internal {
// ES6 section 7.1.3 ToNumber ( argument )
TF_BUILTIN(ToNumber, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto input = Parameter<Object>(Descriptor::kArgument);
Return(ToNumber(context, input));
}
TF_BUILTIN(PlainPrimitiveToNumber, CodeStubAssembler) {
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
+ auto input = Parameter<Object>(Descriptor::kArgument);
Return(PlainPrimitiveToNumber(input));
}
// Like ToNumber, but also converts BigInts.
TF_BUILTIN(ToNumberConvertBigInt, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto input = Parameter<Object>(Descriptor::kArgument);
Return(ToNumber(context, input, BigIntHandling::kConvertToNumber));
}
@@ -38,7 +38,7 @@ TF_BUILTIN(ToNumberConvertBigInt, CodeStubAssembler) {
// Requires parameter on stack so that it can be used as a continuation from a
// LAZY deopt.
TF_BUILTIN(ToBooleanLazyDeoptContinuation, CodeStubAssembler) {
- TNode<Object> value = CAST(Parameter(Descriptor::kArgument));
+ auto value = Parameter<Object>(Descriptor::kArgument);
Label return_true(this), return_false(this);
BranchIfToBooleanIsTrue(value, &return_true, &return_false);
@@ -52,7 +52,7 @@ TF_BUILTIN(ToBooleanLazyDeoptContinuation, CodeStubAssembler) {
// ES6 section 12.5.5 typeof operator
TF_BUILTIN(Typeof, CodeStubAssembler) {
- TNode<Object> object = CAST(Parameter(Descriptor::kObject));
+ auto object = Parameter<Object>(Descriptor::kObject);
Return(Typeof(object));
}
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index 1718ea97ad..3ae331f5d7 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -90,6 +90,7 @@ BUILTIN(DataViewConstructor) {
isolate, result,
JSObject::New(target, new_target, Handle<AllocationSite>::null()));
for (int i = 0; i < ArrayBufferView::kEmbedderFieldCount; ++i) {
+ // TODO(v8:10391, saelo): Handle external pointers in EmbedderDataSlot
Handle<JSDataView>::cast(result)->SetEmbedderField(i, Smi::zero());
}
@@ -101,6 +102,7 @@ BUILTIN(DataViewConstructor) {
// 13. Set O's [[ByteOffset]] internal slot to offset.
Handle<JSDataView>::cast(result)->set_byte_offset(view_byte_offset);
+ Handle<JSDataView>::cast(result)->AllocateExternalPointerEntries(isolate);
Handle<JSDataView>::cast(result)->set_data_pointer(
isolate,
static_cast<uint8_t*>(array_buffer->backing_store()) + view_byte_offset);
diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc
index 05fcc53f12..6d43013501 100644
--- a/deps/v8/src/builtins/builtins-date-gen.cc
+++ b/deps/v8/src/builtins/builtins-date-gen.cc
@@ -69,123 +69,123 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(
}
TF_BUILTIN(DatePrototypeGetDate, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kDay);
}
TF_BUILTIN(DatePrototypeGetDay, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kWeekday);
}
TF_BUILTIN(DatePrototypeGetFullYear, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kYear);
}
TF_BUILTIN(DatePrototypeGetHours, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kHour);
}
TF_BUILTIN(DatePrototypeGetMilliseconds, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kMillisecond);
}
TF_BUILTIN(DatePrototypeGetMinutes, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kMinute);
}
TF_BUILTIN(DatePrototypeGetMonth, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kMonth);
}
TF_BUILTIN(DatePrototypeGetSeconds, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kSecond);
}
TF_BUILTIN(DatePrototypeGetTime, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kDateValue);
}
TF_BUILTIN(DatePrototypeGetTimezoneOffset, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kTimezoneOffset);
}
TF_BUILTIN(DatePrototypeGetUTCDate, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kDayUTC);
}
TF_BUILTIN(DatePrototypeGetUTCDay, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kWeekdayUTC);
}
TF_BUILTIN(DatePrototypeGetUTCFullYear, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kYearUTC);
}
TF_BUILTIN(DatePrototypeGetUTCHours, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kHourUTC);
}
TF_BUILTIN(DatePrototypeGetUTCMilliseconds, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kMillisecondUTC);
}
TF_BUILTIN(DatePrototypeGetUTCMinutes, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kMinuteUTC);
}
TF_BUILTIN(DatePrototypeGetUTCMonth, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kMonthUTC);
}
TF_BUILTIN(DatePrototypeGetUTCSeconds, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kSecondUTC);
}
TF_BUILTIN(DatePrototypeValueOf, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kDateValue);
}
TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> hint = CAST(Parameter(Descriptor::kHint));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto hint = Parameter<Object>(Descriptor::kHint);
// Check if the {receiver} is actually a JSReceiver.
Label receiver_is_invalid(this, Label::kDeferred);
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 10bbd12f8a..a30520d150 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -138,6 +138,10 @@ namespace internal {
TFC(CompileLazyDeoptimizedCode, JSTrampoline) \
TFC(InstantiateAsmJs, JSTrampoline) \
ASM(NotifyDeoptimized, Dummy) \
+ ASM(DeoptimizationEntry_Eager, DeoptimizationEntry) \
+ ASM(DeoptimizationEntry_Soft, DeoptimizationEntry) \
+ ASM(DeoptimizationEntry_Bailout, DeoptimizationEntry) \
+ ASM(DeoptimizationEntry_Lazy, DeoptimizationEntry) \
\
/* Trampolines called when returning from a deoptimization that expects */ \
/* to continue in a JavaScript builtin to finish the functionality of a */ \
@@ -192,7 +196,6 @@ namespace internal {
TFC(PlainPrimitiveToNumber, TypeConversionNoContext) \
TFC(ToNumberConvertBigInt, TypeConversion) \
TFC(Typeof, Typeof) \
- TFC(GetSuperConstructor, Typeof) \
TFC(BigIntToI64, BigIntToI64) \
TFC(BigIntToI32Pair, BigIntToI32Pair) \
TFC(I64ToBigInt, I64ToBigInt) \
@@ -364,8 +367,6 @@ namespace internal {
\
/* CallSite */ \
CPP(CallSitePrototypeGetColumnNumber) \
- CPP(CallSitePrototypeGetEnclosingColumnNumber) \
- CPP(CallSitePrototypeGetEnclosingLineNumber) \
CPP(CallSitePrototypeGetEvalOrigin) \
CPP(CallSitePrototypeGetFileName) \
CPP(CallSitePrototypeGetFunction) \
@@ -762,9 +763,6 @@ namespace internal {
TFJ(StringPrototypeSearch, 1, kReceiver, kRegexp) \
/* ES6 #sec-string.prototype.split */ \
TFJ(StringPrototypeSplit, kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrim, kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrimEnd, kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrimStart, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.raw */ \
CPP(StringRaw) \
\
@@ -813,6 +811,7 @@ namespace internal {
TFS(WasmAllocateArrayWithRtt, kMap, kLength, kElementSize) \
TFC(WasmI32AtomicWait32, WasmI32AtomicWait32) \
TFC(WasmI64AtomicWait32, WasmI64AtomicWait32) \
+ TFS(WasmAllocatePair, kValue1, kValue2) \
\
/* WeakMap */ \
TFJ(WeakMapConstructor, kDontAdaptArgumentsSentinel) \
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index b3fbc4fd94..23ee4da8c1 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -80,6 +80,14 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
}
}
+ bool is_code_like = true;
+ for (int i = 0; i < argc; ++i) {
+ if (!args.at(i + 1)->IsCodeLike(isolate)) {
+ is_code_like = false;
+ break;
+ }
+ }
+
// Compile the string in the constructor and not a helper so that errors to
// come from here.
Handle<JSFunction> function;
@@ -88,7 +96,7 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
isolate, function,
Compiler::GetFunctionFromString(
handle(target->native_context(), isolate), source,
- ONLY_SINGLE_FUNCTION_LITERAL, parameters_end_pos),
+ ONLY_SINGLE_FUNCTION_LITERAL, parameters_end_pos, is_code_like),
Object);
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index 8693cd61f4..d93ab2e103 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -137,13 +137,12 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
TF_BUILTIN(AsyncModuleEvaluate, GeneratorBuiltinsAssembler) {
const int kValueArg = 0;
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- TNode<Context> context = Cast(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
// AsyncModules act like JSAsyncFunctions. Thus we check here
// that the {receiver} is a JSAsyncFunction.
@@ -159,13 +158,12 @@ TF_BUILTIN(AsyncModuleEvaluate, GeneratorBuiltinsAssembler) {
TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) {
const int kValueArg = 0;
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
GeneratorPrototypeResume(&args, receiver, value, context,
JSGeneratorObject::kNext,
@@ -176,13 +174,12 @@ TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) {
TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) {
const int kValueArg = 0;
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
GeneratorPrototypeResume(&args, receiver, value, context,
JSGeneratorObject::kReturn,
@@ -193,13 +190,12 @@ TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) {
TF_BUILTIN(GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) {
const int kExceptionArg = 0;
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> exception = args.GetOptionalArgumentValue(kExceptionArg);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
GeneratorPrototypeResume(&args, receiver, exception, context,
JSGeneratorObject::kThrow,
diff --git a/deps/v8/src/builtins/builtins-global-gen.cc b/deps/v8/src/builtins/builtins-global-gen.cc
index 43d30cc6b1..d33fc3c37b 100644
--- a/deps/v8/src/builtins/builtins-global-gen.cc
+++ b/deps/v8/src/builtins/builtins-global-gen.cc
@@ -11,14 +11,14 @@ namespace internal {
// ES #sec-isfinite-number
TF_BUILTIN(GlobalIsFinite, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
Label return_true(this), return_false(this);
// We might need to loop once for ToNumber conversion.
TVARIABLE(Object, var_num);
Label loop(this, &var_num);
- var_num = CAST(Parameter(Descriptor::kNumber));
+ var_num = Parameter<Object>(Descriptor::kNumber);
Goto(&loop);
BIND(&loop);
{
@@ -60,14 +60,14 @@ TF_BUILTIN(GlobalIsFinite, CodeStubAssembler) {
// ES6 #sec-isnan-number
TF_BUILTIN(GlobalIsNaN, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
Label return_true(this), return_false(this);
// We might need to loop once for ToNumber conversion.
TVARIABLE(Object, var_num);
Label loop(this, &var_num);
- var_num = CAST(Parameter(Descriptor::kNumber));
+ var_num = Parameter<Object>(Descriptor::kNumber);
Goto(&loop);
BIND(&loop);
{
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index 8075a597e8..3cbd626b8e 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -43,12 +43,12 @@ class HandlerBuiltinsAssembler : public CodeStubAssembler {
};
TF_BUILTIN(LoadIC_StringLength, CodeStubAssembler) {
- TNode<String> string = CAST(Parameter(Descriptor::kReceiver));
+ auto string = Parameter<String>(Descriptor::kReceiver);
Return(LoadStringLengthAsSmi(string));
}
TF_BUILTIN(LoadIC_StringWrapperLength, CodeStubAssembler) {
- TNode<JSPrimitiveWrapper> value = CAST(Parameter(Descriptor::kReceiver));
+ auto value = Parameter<JSPrimitiveWrapper>(Descriptor::kReceiver);
TNode<String> string = CAST(LoadJSPrimitiveWrapperValue(value));
Return(LoadStringLengthAsSmi(string));
}
@@ -130,13 +130,13 @@ void HandlerBuiltinsAssembler::DispatchForElementsKindTransition(
void HandlerBuiltinsAssembler::Generate_ElementsTransitionAndStore(
KeyedAccessStoreMode store_mode) {
using Descriptor = StoreTransitionDescriptor;
- TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Map> map = CAST(Parameter(Descriptor::kMap));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<FeedbackVector> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto map = Parameter<Map>(Descriptor::kMap);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<FeedbackVector>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
Comment("ElementsTransitionAndStore: store_mode=", store_mode);
@@ -262,12 +262,12 @@ void HandlerBuiltinsAssembler::DispatchByElementsKind(
void HandlerBuiltinsAssembler::Generate_StoreFastElementIC(
KeyedAccessStoreMode store_mode) {
using Descriptor = StoreWithVectorDescriptor;
- TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
Comment("StoreFastElementStub: store_mode=", store_mode);
@@ -312,11 +312,11 @@ TF_BUILTIN(StoreFastElementIC_NoTransitionHandleCOW, HandlerBuiltinsAssembler) {
}
TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) {
- TNode<JSFunction> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Name> name = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<FeedbackVector> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSFunction>(Descriptor::kReceiver);
+ auto name = Parameter<Name>(Descriptor::kName);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<FeedbackVector>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label miss(this, Label::kDeferred);
Return(LoadJSFunctionPrototype(receiver, &miss));
@@ -326,12 +326,12 @@ TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) {
}
TF_BUILTIN(StoreGlobalIC_Slow, CodeStubAssembler) {
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Name> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<FeedbackVector> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Name>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<FeedbackVector>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
@@ -340,11 +340,11 @@ TF_BUILTIN(StoreGlobalIC_Slow, CodeStubAssembler) {
}
TF_BUILTIN(KeyedLoadIC_SloppyArguments, HandlerBuiltinsAssembler) {
- TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label miss(this);
@@ -361,12 +361,12 @@ TF_BUILTIN(KeyedLoadIC_SloppyArguments, HandlerBuiltinsAssembler) {
void HandlerBuiltinsAssembler::Generate_KeyedStoreIC_SloppyArguments() {
using Descriptor = StoreWithVectorDescriptor;
- TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label miss(this);
@@ -398,11 +398,11 @@ TF_BUILTIN(KeyedStoreIC_SloppyArguments_NoTransitionHandleCOW,
}
TF_BUILTIN(LoadIndexedInterceptorIC, CodeStubAssembler) {
- TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label if_keyispositivesmi(this), if_keyisinvalid(this);
Branch(TaggedIsPositiveSmi(key), &if_keyispositivesmi, &if_keyisinvalid);
@@ -415,11 +415,11 @@ TF_BUILTIN(LoadIndexedInterceptorIC, CodeStubAssembler) {
}
TF_BUILTIN(KeyedHasIC_SloppyArguments, HandlerBuiltinsAssembler) {
- TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label miss(this);
@@ -435,11 +435,11 @@ TF_BUILTIN(KeyedHasIC_SloppyArguments, HandlerBuiltinsAssembler) {
}
TF_BUILTIN(HasIndexedInterceptorIC, CodeStubAssembler) {
- TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label if_keyispositivesmi(this), if_keyisinvalid(this);
Branch(TaggedIsPositiveSmi(key), &if_keyispositivesmi, &if_keyisinvalid);
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 13698758e6..4108d897f6 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -30,7 +30,7 @@ void Builtins::Generate_StackCheck(MacroAssembler* masm) {
// TurboFan support builtins.
TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) {
- TNode<JSObject> js_object = CAST(Parameter(Descriptor::kObject));
+ auto js_object = Parameter<JSObject>(Descriptor::kObject);
// Load the {object}s elements.
TNode<FixedArrayBase> source =
@@ -42,8 +42,8 @@ TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) {
}
TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) {
- TNode<JSObject> object = CAST(Parameter(Descriptor::kObject));
- TNode<Smi> key = CAST(Parameter(Descriptor::kKey));
+ auto object = Parameter<JSObject>(Descriptor::kObject);
+ auto key = Parameter<Smi>(Descriptor::kKey);
Label runtime(this, Label::kDeferred);
TNode<FixedArrayBase> elements = LoadElements(object);
@@ -57,8 +57,8 @@ TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) {
}
TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
- TNode<JSObject> object = CAST(Parameter(Descriptor::kObject));
- TNode<Smi> key = CAST(Parameter(Descriptor::kKey));
+ auto object = Parameter<JSObject>(Descriptor::kObject);
+ auto key = Parameter<Smi>(Descriptor::kKey);
Label runtime(this, Label::kDeferred);
TNode<FixedArrayBase> elements = LoadElements(object);
@@ -72,17 +72,17 @@ TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
}
TF_BUILTIN(ReturnReceiver, CodeStubAssembler) {
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Return(receiver);
}
TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) {
Label tailcall_to_shared(this);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
- TNode<Int32T> arg_count =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- TNode<JSFunction> function = CAST(Parameter(Descriptor::kJSTarget));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto new_target = Parameter<Object>(Descriptor::kJSNewTarget);
+ auto arg_count =
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
+ auto function = Parameter<JSFunction>(Descriptor::kJSTarget);
// Check break-at-entry flag on the debug info.
TNode<SharedFunctionInfo> shared =
@@ -311,8 +311,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
Label incremental_wb(this);
Label exit(this);
- TNode<Smi> remembered_set =
- UncheckedCast<Smi>(Parameter(Descriptor::kRememberedSet));
+ auto remembered_set = UncheckedParameter<Smi>(Descriptor::kRememberedSet);
Branch(ShouldEmitRememberSet(remembered_set), &generational_wb,
&incremental_wb);
@@ -327,7 +326,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
// `kPointersToHereAreInterestingMask` in
// `src/compiler/<arch>/code-generator-<arch>.cc` before calling this stub,
// which serves as the cross generation checking.
- TNode<IntPtrT> slot = UncheckedCast<IntPtrT>(Parameter(Descriptor::kSlot));
+ auto slot = UncheckedParameter<IntPtrT>(Descriptor::kSlot);
Branch(IsMarking(), &test_old_to_young_flags, &store_buffer_exit);
BIND(&test_old_to_young_flags);
@@ -343,7 +342,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
GotoIfNot(value_is_young, &incremental_wb);
TNode<IntPtrT> object =
- BitcastTaggedToWord(Parameter(Descriptor::kObject));
+ BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
TNode<BoolT> object_is_young =
IsPageFlagSet(object, MemoryChunk::kIsInYoungGenerationMask);
Branch(object_is_young, &incremental_wb, &store_buffer_incremental_wb);
@@ -351,17 +350,17 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
BIND(&store_buffer_exit);
{
- TNode<Smi> fp_mode = UncheckedCast<Smi>(Parameter(Descriptor::kFPMode));
+ auto fp_mode = UncheckedParameter<Smi>(Descriptor::kFPMode);
TNode<IntPtrT> object =
- BitcastTaggedToWord(Parameter(Descriptor::kObject));
+ BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
InsertIntoRememberedSetAndGoto(object, slot, fp_mode, &exit);
}
BIND(&store_buffer_incremental_wb);
{
- TNode<Smi> fp_mode = UncheckedCast<Smi>(Parameter(Descriptor::kFPMode));
+ auto fp_mode = UncheckedParameter<Smi>(Descriptor::kFPMode);
TNode<IntPtrT> object =
- BitcastTaggedToWord(Parameter(Descriptor::kObject));
+ BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
InsertIntoRememberedSetAndGoto(object, slot, fp_mode, &incremental_wb);
}
}
@@ -370,7 +369,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
{
Label call_incremental_wb(this);
- TNode<IntPtrT> slot = UncheckedCast<IntPtrT>(Parameter(Descriptor::kSlot));
+ auto slot = UncheckedParameter<IntPtrT>(Descriptor::kSlot);
TNode<IntPtrT> value =
BitcastTaggedToWord(Load(MachineType::TaggedPointer(), slot));
@@ -383,7 +382,8 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
GotoIfNot(IsPageFlagSet(value, MemoryChunk::kEvacuationCandidateMask),
&exit);
- TNode<IntPtrT> object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
+ TNode<IntPtrT> object =
+ BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
Branch(
IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask),
&exit, &call_incremental_wb);
@@ -392,9 +392,9 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
{
TNode<ExternalReference> function = ExternalConstant(
ExternalReference::write_barrier_marking_from_code_function());
- TNode<Smi> fp_mode = UncheckedCast<Smi>(Parameter(Descriptor::kFPMode));
+ auto fp_mode = UncheckedParameter<Smi>(Descriptor::kFPMode);
TNode<IntPtrT> object =
- BitcastTaggedToWord(Parameter(Descriptor::kObject));
+ BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
CallCFunction2WithCallerSavedRegistersMode<Int32T, IntPtrT, IntPtrT>(
function, object, slot, fp_mode, &exit);
}
@@ -412,10 +412,10 @@ TF_BUILTIN(EphemeronKeyBarrier, RecordWriteCodeStubAssembler) {
ExternalReference::ephemeron_key_write_barrier_function());
TNode<ExternalReference> isolate_constant =
ExternalConstant(ExternalReference::isolate_address(isolate()));
- TNode<IntPtrT> address =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kSlotAddress));
- TNode<IntPtrT> object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
- TNode<Smi> fp_mode = UncheckedCast<Smi>(Parameter(Descriptor::kFPMode));
+ auto address = UncheckedParameter<IntPtrT>(Descriptor::kSlotAddress);
+ TNode<IntPtrT> object =
+ BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
+ TNode<Smi> fp_mode = UncheckedParameter<Smi>(Descriptor::kFPMode);
CallCFunction3WithCallerSavedRegistersMode<Int32T, IntPtrT, IntPtrT,
ExternalReference>(
function, object, address, isolate_constant, fp_mode, &exit);
@@ -476,10 +476,10 @@ class DeletePropertyBaseAssembler : public AccessorAssembler {
};
TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
- TNode<Object> receiver = CAST(Parameter(Descriptor::kObject));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- TNode<Smi> language_mode = CAST(Parameter(Descriptor::kLanguageMode));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kObject);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ auto language_mode = Parameter<Smi>(Descriptor::kLanguageMode);
+ auto context = Parameter<Context>(Descriptor::kContext);
TVARIABLE(IntPtrT, var_index);
TVARIABLE(Name, var_unique);
@@ -641,9 +641,9 @@ class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler {
// ES #sec-copydataproperties
TF_BUILTIN(CopyDataProperties, SetOrCopyDataPropertiesAssembler) {
- TNode<JSObject> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> source = CAST(Parameter(Descriptor::kSource));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto target = Parameter<JSObject>(Descriptor::kTarget);
+ auto source = Parameter<Object>(Descriptor::kSource);
+ auto context = Parameter<Context>(Descriptor::kContext);
CSA_ASSERT(this, TaggedNotEqual(target, source));
@@ -655,9 +655,9 @@ TF_BUILTIN(CopyDataProperties, SetOrCopyDataPropertiesAssembler) {
}
TF_BUILTIN(SetDataProperties, SetOrCopyDataPropertiesAssembler) {
- TNode<JSReceiver> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> source = CAST(Parameter(Descriptor::kSource));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto target = Parameter<JSReceiver>(Descriptor::kTarget);
+ auto source = Parameter<Object>(Descriptor::kSource);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label if_runtime(this, Label::kDeferred);
Return(SetOrCopyDataProperties(context, target, source, &if_runtime, true));
@@ -667,8 +667,8 @@ TF_BUILTIN(SetDataProperties, SetOrCopyDataPropertiesAssembler) {
}
TF_BUILTIN(ForInEnumerate, CodeStubAssembler) {
- TNode<JSReceiver> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSReceiver>(Descriptor::kReceiver);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label if_empty(this), if_runtime(this, Label::kDeferred);
TNode<Map> receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime);
@@ -682,9 +682,9 @@ TF_BUILTIN(ForInEnumerate, CodeStubAssembler) {
}
TF_BUILTIN(ForInFilter, CodeStubAssembler) {
- TNode<String> key = CAST(Parameter(Descriptor::kKey));
- TNode<HeapObject> object = CAST(Parameter(Descriptor::kObject));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto key = Parameter<String>(Descriptor::kKey);
+ auto object = Parameter<HeapObject>(Descriptor::kObject);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label if_true(this), if_false(this);
TNode<Oddball> result = HasProperty(context, object, key, kForInHasProperty);
@@ -698,8 +698,8 @@ TF_BUILTIN(ForInFilter, CodeStubAssembler) {
}
TF_BUILTIN(SameValue, CodeStubAssembler) {
- TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> rhs = CAST(Parameter(Descriptor::kRight));
+ auto lhs = Parameter<Object>(Descriptor::kLeft);
+ auto rhs = Parameter<Object>(Descriptor::kRight);
Label if_true(this), if_false(this);
BranchIfSameValue(lhs, rhs, &if_true, &if_false);
@@ -712,8 +712,8 @@ TF_BUILTIN(SameValue, CodeStubAssembler) {
}
TF_BUILTIN(SameValueNumbersOnly, CodeStubAssembler) {
- TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> rhs = CAST(Parameter(Descriptor::kRight));
+ auto lhs = Parameter<Object>(Descriptor::kLeft);
+ auto rhs = Parameter<Object>(Descriptor::kRight);
Label if_true(this), if_false(this);
BranchIfSameValue(lhs, rhs, &if_true, &if_false, SameValueMode::kNumbersOnly);
@@ -726,10 +726,9 @@ TF_BUILTIN(SameValueNumbersOnly, CodeStubAssembler) {
}
TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
- TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<WordT> c_function =
- UncheckedCast<WordT>(Parameter(Descriptor::kCFunction));
+ auto target = Parameter<JSFunction>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto c_function = UncheckedParameter<WordT>(Descriptor::kCFunction);
// The logic contained here is mirrored for TurboFan inlining in
// JSTypedLowering::ReduceJSCall{Function,Construct}. Keep these in sync.
@@ -740,8 +739,8 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
// ordinary functions).
TNode<Context> context = LoadJSFunctionContext(target);
- TNode<Int32T> actual_argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ auto actual_argc =
+ UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
TVARIABLE(Int32T, pushed_argc, actual_argc);
@@ -789,8 +788,7 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
}
TF_BUILTIN(AllocateInYoungGeneration, CodeStubAssembler) {
- TNode<IntPtrT> requested_size =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
+ auto requested_size = UncheckedParameter<IntPtrT>(Descriptor::kRequestedSize);
CSA_CHECK(this, IsValidPositiveSmi(requested_size));
TNode<Smi> allocation_flags =
@@ -801,8 +799,7 @@ TF_BUILTIN(AllocateInYoungGeneration, CodeStubAssembler) {
}
TF_BUILTIN(AllocateRegularInYoungGeneration, CodeStubAssembler) {
- TNode<IntPtrT> requested_size =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
+ auto requested_size = UncheckedParameter<IntPtrT>(Descriptor::kRequestedSize);
CSA_CHECK(this, IsValidPositiveSmi(requested_size));
TNode<Smi> allocation_flags =
@@ -813,8 +810,7 @@ TF_BUILTIN(AllocateRegularInYoungGeneration, CodeStubAssembler) {
}
TF_BUILTIN(AllocateInOldGeneration, CodeStubAssembler) {
- TNode<IntPtrT> requested_size =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
+ auto requested_size = UncheckedParameter<IntPtrT>(Descriptor::kRequestedSize);
CSA_CHECK(this, IsValidPositiveSmi(requested_size));
TNode<Smi> runtime_flags =
@@ -825,8 +821,7 @@ TF_BUILTIN(AllocateInOldGeneration, CodeStubAssembler) {
}
TF_BUILTIN(AllocateRegularInOldGeneration, CodeStubAssembler) {
- TNode<IntPtrT> requested_size =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
+ auto requested_size = UncheckedParameter<IntPtrT>(Descriptor::kRequestedSize);
CSA_CHECK(this, IsValidPositiveSmi(requested_size));
TNode<Smi> runtime_flags =
@@ -837,12 +832,12 @@ TF_BUILTIN(AllocateRegularInOldGeneration, CodeStubAssembler) {
}
TF_BUILTIN(Abort, CodeStubAssembler) {
- TNode<Smi> message_id = CAST(Parameter(Descriptor::kMessageOrMessageId));
+ auto message_id = Parameter<Smi>(Descriptor::kMessageOrMessageId);
TailCallRuntime(Runtime::kAbort, NoContextConstant(), message_id);
}
TF_BUILTIN(AbortCSAAssert, CodeStubAssembler) {
- TNode<String> message = CAST(Parameter(Descriptor::kMessageOrMessageId));
+ auto message = Parameter<String>(Descriptor::kMessageOrMessageId);
TailCallRuntime(Runtime::kAbortCSAAssert, NoContextConstant(), message);
}
@@ -912,9 +907,9 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
// ES6 [[Get]] operation.
TF_BUILTIN(GetProperty, CodeStubAssembler) {
- TNode<Object> object = CAST(Parameter(Descriptor::kObject));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto object = Parameter<Object>(Descriptor::kObject);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ auto context = Parameter<Context>(Descriptor::kContext);
// TODO(duongn): consider tailcalling to GetPropertyWithReceiver(object,
// object, key, OnNonExistent::kReturnUndefined).
Label if_notfound(this), if_proxy(this, Label::kDeferred),
@@ -967,11 +962,11 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) {
// ES6 [[Get]] operation with Receiver.
TF_BUILTIN(GetPropertyWithReceiver, CodeStubAssembler) {
- TNode<Object> object = CAST(Parameter(Descriptor::kObject));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> on_non_existent = CAST(Parameter(Descriptor::kOnNonExistent));
+ auto object = Parameter<Object>(Descriptor::kObject);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto on_non_existent = Parameter<Object>(Descriptor::kOnNonExistent);
Label if_notfound(this), if_proxy(this, Label::kDeferred),
if_slow(this, Label::kDeferred);
@@ -1035,10 +1030,10 @@ TF_BUILTIN(GetPropertyWithReceiver, CodeStubAssembler) {
// ES6 [[Set]] operation.
TF_BUILTIN(SetProperty, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ auto value = Parameter<Object>(Descriptor::kValue);
KeyedStoreGenericGenerator::SetProperty(state(), context, receiver, key,
value, LanguageMode::kStrict);
@@ -1049,10 +1044,10 @@ TF_BUILTIN(SetProperty, CodeStubAssembler) {
// any operation here should be unobservable until after the object has been
// returned.
TF_BUILTIN(SetPropertyInLiteral, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ auto value = Parameter<Object>(Descriptor::kValue);
KeyedStoreGenericGenerator::SetPropertyInLiteral(state(), context, receiver,
key, value);
@@ -1060,11 +1055,11 @@ TF_BUILTIN(SetPropertyInLiteral, CodeStubAssembler) {
TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) {
Label tailcall_to_function(this);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Int32T> arg_count =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<JSFunction> function = CAST(Parameter(Descriptor::kTarget));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto arg_count =
+ UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto function = Parameter<JSFunction>(Descriptor::kTarget);
// Retrieve arguments from caller (stdlib, foreign, heap).
CodeStubArguments args(this, arg_count);
@@ -1077,6 +1072,22 @@ TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) {
TNode<Object> maybe_result_or_smi_zero = CallRuntime(
Runtime::kInstantiateAsmJs, context, function, stdlib, foreign, heap);
GotoIf(TaggedIsSmi(maybe_result_or_smi_zero), &tailcall_to_function);
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ TNode<SharedFunctionInfo> shared = LoadJSFunctionSharedFunctionInfo(function);
+ TNode<Int32T> parameter_count =
+ UncheckedCast<Int32T>(LoadSharedFunctionInfoFormalParameterCount(shared));
+ // This builtin intercepts a call to {function}, where the number of arguments
+ // pushed is the maximum of actual arguments count and formal parameters
+ // count.
+ Label argc_lt_param_count(this), argc_ge_param_count(this);
+ Branch(Int32LessThan(arg_count, parameter_count), &argc_lt_param_count,
+ &argc_ge_param_count);
+ BIND(&argc_lt_param_count);
+ PopAndReturn(Int32Add(parameter_count, Int32Constant(1)),
+ maybe_result_or_smi_zero);
+ BIND(&argc_ge_param_count);
+#endif
args.PopAndReturn(maybe_result_or_smi_zero);
BIND(&tailcall_to_function);
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
index 51546f98e8..42ccbebcbc 100644
--- a/deps/v8/src/builtins/builtins-intl-gen.cc
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -40,7 +40,7 @@ class IntlBuiltinsAssembler : public CodeStubAssembler {
};
TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
- const TNode<String> string = CAST(Parameter(Descriptor::kString));
+ const auto string = Parameter<String>(Descriptor::kString);
Label call_c(this), return_string(this), runtime(this, Label::kDeferred);
@@ -136,8 +136,8 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
}
TF_BUILTIN(StringPrototypeToLowerCaseIntl, IntlBuiltinsAssembler) {
- TNode<Object> maybe_string = CAST(Parameter(Descriptor::kReceiver));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto maybe_string = Parameter<Object>(Descriptor::kReceiver);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<String> string =
ToThisString(context, maybe_string, "String.prototype.toLowerCase");
@@ -183,15 +183,15 @@ TNode<JSArray> IntlBuiltinsAssembler::AllocateEmptyJSArray(
TF_BUILTIN(ListFormatPrototypeFormat, IntlBuiltinsAssembler) {
ListFormatCommon(
- CAST(Parameter(Descriptor::kContext)),
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)),
+ Parameter<Context>(Descriptor::kContext),
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount),
Runtime::kFormatList, "Intl.ListFormat.prototype.format");
}
TF_BUILTIN(ListFormatPrototypeFormatToParts, IntlBuiltinsAssembler) {
ListFormatCommon(
- CAST(Parameter(Descriptor::kContext)),
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)),
+ Parameter<Context>(Descriptor::kContext),
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount),
Runtime::kFormatListToParts, "Intl.ListFormat.prototype.formatToParts");
}
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index 9f3ec5c323..8cf52e5368 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -181,25 +181,25 @@ void IteratorBuiltinsAssembler::FillFixedArrayFromIterable(
}
TF_BUILTIN(IterableToList, IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
- TNode<Object> iterator_fn = CAST(Parameter(Descriptor::kIteratorFn));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterable = Parameter<Object>(Descriptor::kIterable);
+ auto iterator_fn = Parameter<Object>(Descriptor::kIteratorFn);
Return(IterableToList(context, iterable, iterator_fn));
}
TF_BUILTIN(IterableToFixedArray, IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
- TNode<Object> iterator_fn = CAST(Parameter(Descriptor::kIteratorFn));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterable = Parameter<Object>(Descriptor::kIterable);
+ auto iterator_fn = Parameter<Object>(Descriptor::kIteratorFn);
Return(IterableToFixedArray(context, iterable, iterator_fn));
}
TF_BUILTIN(IterableToFixedArrayForWasm, IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
- TNode<Smi> expected_length = CAST(Parameter(Descriptor::kExpectedLength));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterable = Parameter<Object>(Descriptor::kIterable);
+ auto expected_length = Parameter<Smi>(Descriptor::kExpectedLength);
TNode<Object> iterator_fn = GetIteratorMethod(context, iterable);
GrowableFixedArray values(state());
@@ -280,8 +280,8 @@ TNode<JSArray> IteratorBuiltinsAssembler::StringListFromIterable(
}
TF_BUILTIN(StringListFromIterable, IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterable = Parameter<Object>(Descriptor::kIterable);
Return(StringListFromIterable(context, iterable));
}
@@ -296,9 +296,9 @@ TF_BUILTIN(StringListFromIterable, IteratorBuiltinsAssembler) {
// prototype has no elements). To maintain the correct behavior for holey
// arrays, use the builtins IterableToList or IterableToListWithSymbolLookup.
TF_BUILTIN(IterableToListMayPreserveHoles, IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
- TNode<Object> iterator_fn = CAST(Parameter(Descriptor::kIteratorFn));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterable = Parameter<Object>(Descriptor::kIterable);
+ auto iterator_fn = Parameter<Object>(Descriptor::kIteratorFn);
Label slow_path(this);
@@ -389,8 +389,8 @@ TNode<JSArray> IteratorBuiltinsAssembler::FastIterableToList(
// iterator is not partially consumed. To be spec-compliant, after spreading
// the iterator is set to be exhausted.
TF_BUILTIN(IterableToListWithSymbolLookup, IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterable = Parameter<Object>(Descriptor::kIterable);
Label slow_path(this);
@@ -409,13 +409,13 @@ TF_BUILTIN(IterableToListWithSymbolLookup, IteratorBuiltinsAssembler) {
TF_BUILTIN(GetIteratorWithFeedbackLazyDeoptContinuation,
IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
// TODO(v8:10047): Use TaggedIndex here once TurboFan supports it.
- TNode<Smi> call_slot_smi = CAST(Parameter(Descriptor::kCallSlot));
+ auto call_slot_smi = Parameter<Smi>(Descriptor::kCallSlot);
TNode<TaggedIndex> call_slot = SmiToTaggedIndex(call_slot_smi);
- TNode<FeedbackVector> feedback = CAST(Parameter(Descriptor::kFeedback));
- TNode<Object> iterator_method = CAST(Parameter(Descriptor::kResult));
+ auto feedback = Parameter<FeedbackVector>(Descriptor::kFeedback);
+ auto iterator_method = Parameter<Object>(Descriptor::kResult);
TNode<Object> result =
CallBuiltin(Builtins::kCallIteratorWithFeedback, context, receiver,
@@ -427,8 +427,8 @@ TF_BUILTIN(GetIteratorWithFeedbackLazyDeoptContinuation,
// fast path for anything.
TF_BUILTIN(IterableToFixedArrayWithSymbolLookupSlow,
IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterable = Parameter<Object>(Descriptor::kIterable);
TNode<Object> iterator_fn = GetIteratorMethod(context, iterable);
TailCallBuiltin(Builtins::kIterableToFixedArray, context, iterable,
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc
index 95d5229974..bd28cbc160 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.cc
+++ b/deps/v8/src/builtins/builtins-lazy-gen.cc
@@ -15,82 +15,72 @@ namespace internal {
void LazyBuiltinsAssembler::GenerateTailCallToJSCode(
TNode<Code> code, TNode<JSFunction> function) {
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
TailCallJSCode(code, context, function, new_target, argc);
}
void LazyBuiltinsAssembler::GenerateTailCallToReturnedCode(
Runtime::FunctionId function_id, TNode<JSFunction> function) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<Code> code = CAST(CallRuntime(function_id, context, function));
GenerateTailCallToJSCode(code, function);
}
void LazyBuiltinsAssembler::TailCallRuntimeIfMarkerEquals(
- TNode<Smi> marker, OptimizationMarker expected_marker,
+ TNode<Uint32T> marker, OptimizationMarker expected_marker,
Runtime::FunctionId function_id, TNode<JSFunction> function) {
Label no_match(this);
- GotoIfNot(SmiEqual(marker, SmiConstant(expected_marker)), &no_match);
+ GotoIfNot(Word32Equal(marker, Uint32Constant(expected_marker)), &no_match);
GenerateTailCallToReturnedCode(function_id, function);
BIND(&no_match);
}
void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
TNode<JSFunction> function, TNode<FeedbackVector> feedback_vector) {
- Label fallthrough(this);
-
- TNode<MaybeObject> maybe_optimized_code_entry = LoadMaybeWeakObjectField(
- feedback_vector, FeedbackVector::kOptimizedCodeWeakOrSmiOffset);
-
- // Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret it as a weak reference to a code
- // object.
- Label optimized_code_slot_is_smi(this), optimized_code_slot_is_weak_ref(this);
- Branch(TaggedIsSmi(maybe_optimized_code_entry), &optimized_code_slot_is_smi,
- &optimized_code_slot_is_weak_ref);
-
- BIND(&optimized_code_slot_is_smi);
- {
- // Optimized code slot is a Smi optimization marker.
- TNode<Smi> marker = CAST(maybe_optimized_code_entry);
-
- // Fall through if no optimization trigger.
- GotoIf(SmiEqual(marker, SmiConstant(OptimizationMarker::kNone)),
- &fallthrough);
-
- // TODO(ishell): introduce Runtime::kHandleOptimizationMarker and check
- // all these marker values there.
- TailCallRuntimeIfMarkerEquals(marker,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution, function);
- TailCallRuntimeIfMarkerEquals(marker, OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent,
- function);
- TailCallRuntimeIfMarkerEquals(
- marker, OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent, function);
-
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
- CSA_ASSERT(this,
- SmiEqual(marker,
- SmiConstant(OptimizationMarker::kInOptimizationQueue)));
- Goto(&fallthrough);
- }
-
- BIND(&optimized_code_slot_is_weak_ref);
+ Label fallthrough(this), may_have_optimized_code(this);
+
+ TNode<Uint32T> optimization_state =
+ LoadObjectField<Uint32T>(feedback_vector, FeedbackVector::kFlagsOffset);
+
+ // Fall through if no optimization trigger or optimized code.
+ GotoIfNot(IsSetWord32(
+ optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask),
+ &fallthrough);
+
+ GotoIfNot(IsSetWord32(
+ optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker),
+ &may_have_optimized_code);
+
+ // TODO(ishell): introduce Runtime::kHandleOptimizationMarker and check
+ // all these marker values there.
+ TNode<Uint32T> marker =
+ DecodeWord32<FeedbackVector::OptimizationMarkerBits>(optimization_state);
+ TailCallRuntimeIfMarkerEquals(marker, OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution, function);
+ TailCallRuntimeIfMarkerEquals(marker, OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent,
+ function);
+ TailCallRuntimeIfMarkerEquals(
+ marker, OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent, function);
+
+ Unreachable();
+ BIND(&may_have_optimized_code);
{
+ Label heal_optimized_code_slot(this);
+ TNode<MaybeObject> maybe_optimized_code_entry = LoadMaybeWeakObjectField(
+ feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset);
// Optimized code slot is a weak reference.
- TNode<Code> optimized_code =
- CAST(GetHeapObjectAssumeWeak(maybe_optimized_code_entry, &fallthrough));
+ TNode<Code> optimized_code = CAST(GetHeapObjectAssumeWeak(
+ maybe_optimized_code_entry, &heal_optimized_code_slot));
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- Label found_deoptimized_code(this);
TNode<CodeDataContainer> code_data_container =
CAST(LoadObjectField(optimized_code, Code::kCodeDataContainerOffset));
@@ -98,17 +88,18 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
code_data_container, CodeDataContainer::kKindSpecificFlagsOffset);
GotoIf(IsSetWord32<Code::MarkedForDeoptimizationField>(
code_kind_specific_flags),
- &found_deoptimized_code);
+ &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
StoreObjectField(function, JSFunction::kCodeOffset, optimized_code);
GenerateTailCallToJSCode(optimized_code, function);
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- BIND(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(Runtime::kEvictOptimizedCodeSlot, function);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ BIND(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot, function);
}
// Fall-through if the optimized code cell is clear and there is no
@@ -156,13 +147,13 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
}
TF_BUILTIN(CompileLazy, LazyBuiltinsAssembler) {
- TNode<JSFunction> function = CAST(Parameter(Descriptor::kTarget));
+ auto function = Parameter<JSFunction>(Descriptor::kTarget);
CompileLazy(function);
}
TF_BUILTIN(CompileLazyDeoptimizedCode, LazyBuiltinsAssembler) {
- TNode<JSFunction> function = CAST(Parameter(Descriptor::kTarget));
+ auto function = Parameter<JSFunction>(Descriptor::kTarget);
// Set the code slot inside the JSFunction to CompileLazy.
TNode<Code> code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.h b/deps/v8/src/builtins/builtins-lazy-gen.h
index 6036da4661..b51dcb58d4 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.h
+++ b/deps/v8/src/builtins/builtins-lazy-gen.h
@@ -21,7 +21,7 @@ class LazyBuiltinsAssembler : public CodeStubAssembler {
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id,
TNode<JSFunction> function);
- void TailCallRuntimeIfMarkerEquals(TNode<Smi> marker,
+ void TailCallRuntimeIfMarkerEquals(TNode<Uint32T> marker,
OptimizationMarker expected_marker,
Runtime::FunctionId function_id,
TNode<JSFunction> function);
diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
index 1da6f54c82..9f16186d13 100644
--- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
+++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -53,8 +53,9 @@ class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler {
TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueue(
TNode<Context> native_context) {
CSA_ASSERT(this, IsNativeContext(native_context));
- return DecodeExternalPointer(LoadObjectField<ExternalPointerT>(
- native_context, NativeContext::kMicrotaskQueueOffset));
+ return LoadExternalPointerFromObject(native_context,
+ NativeContext::kMicrotaskQueueOffset,
+ kNativeContextMicrotaskQueueTag);
}
TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskRingBuffer(
@@ -489,8 +490,8 @@ void MicrotaskQueueBuiltinsAssembler::RunPromiseHook(
}
TF_BUILTIN(EnqueueMicrotask, MicrotaskQueueBuiltinsAssembler) {
- TNode<Microtask> microtask = CAST(Parameter(Descriptor::kMicrotask));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto microtask = Parameter<Microtask>(Descriptor::kMicrotask);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<RawPtrT> microtask_queue = GetMicrotaskQueue(native_context);
@@ -541,8 +542,8 @@ TF_BUILTIN(RunMicrotasks, MicrotaskQueueBuiltinsAssembler) {
// Load the current context from the isolate.
TNode<Context> current_context = GetCurrentContext();
- TNode<RawPtrT> microtask_queue =
- UncheckedCast<RawPtrT>(Parameter(Descriptor::kMicrotaskQueue));
+ auto microtask_queue =
+ UncheckedParameter<RawPtrT>(Descriptor::kMicrotaskQueue);
Label loop(this), done(this);
Goto(&loop);
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index 4e8bcae60b..0e57959aad 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -16,13 +16,12 @@ namespace internal {
#define DEF_BINOP(Name, Generator) \
TF_BUILTIN(Name, CodeStubAssembler) { \
- TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft)); \
- TNode<Object> rhs = CAST(Parameter(Descriptor::kRight)); \
- TNode<Context> context = CAST(Parameter(Descriptor::kContext)); \
- TNode<HeapObject> maybe_feedback_vector = \
- CAST(Parameter(Descriptor::kMaybeFeedbackVector)); \
- TNode<UintPtrT> slot = \
- UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot)); \
+ auto lhs = Parameter<Object>(Descriptor::kLeft); \
+ auto rhs = Parameter<Object>(Descriptor::kRight); \
+ auto context = Parameter<Context>(Descriptor::kContext); \
+ auto maybe_feedback_vector = \
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector); \
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
\
BinaryOpAssembler binop_asm(state()); \
TNode<Object> result = binop_asm.Generator(context, lhs, rhs, slot, \
@@ -45,20 +44,19 @@ DEF_BINOP(ShiftRightLogical_WithFeedback,
Generate_ShiftRightLogicalWithFeedback)
#undef DEF_BINOP
-#define DEF_UNOP(Name, Generator) \
- TF_BUILTIN(Name, CodeStubAssembler) { \
- TNode<Object> value = CAST(Parameter(Descriptor::kValue)); \
- TNode<Context> context = CAST(Parameter(Descriptor::kContext)); \
- TNode<HeapObject> maybe_feedback_vector = \
- CAST(Parameter(Descriptor::kMaybeFeedbackVector)); \
- TNode<UintPtrT> slot = \
- UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot)); \
- \
- UnaryOpAssembler a(state()); \
- TNode<Object> result = \
- a.Generator(context, value, slot, maybe_feedback_vector); \
- \
- Return(result); \
+#define DEF_UNOP(Name, Generator) \
+ TF_BUILTIN(Name, CodeStubAssembler) { \
+ auto value = Parameter<Object>(Descriptor::kValue); \
+ auto context = Parameter<Context>(Descriptor::kContext); \
+ auto maybe_feedback_vector = \
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector); \
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
+ \
+ UnaryOpAssembler a(state()); \
+ TNode<Object> result = \
+ a.Generator(context, value, slot, maybe_feedback_vector); \
+ \
+ Return(result); \
}
DEF_UNOP(BitwiseNot_WithFeedback, Generate_BitwiseNotWithFeedback)
DEF_UNOP(Decrement_WithFeedback, Generate_DecrementWithFeedback)
@@ -68,13 +66,12 @@ DEF_UNOP(Negate_WithFeedback, Generate_NegateWithFeedback)
#define DEF_COMPARE(Name) \
TF_BUILTIN(Name##_WithFeedback, CodeStubAssembler) { \
- TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft)); \
- TNode<Object> rhs = CAST(Parameter(Descriptor::kRight)); \
- TNode<Context> context = CAST(Parameter(Descriptor::kContext)); \
- TNode<HeapObject> maybe_feedback_vector = \
- CAST(Parameter(Descriptor::kMaybeFeedbackVector)); \
- TNode<UintPtrT> slot = \
- UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot)); \
+ auto lhs = Parameter<Object>(Descriptor::kLeft); \
+ auto rhs = Parameter<Object>(Descriptor::kRight); \
+ auto context = Parameter<Context>(Descriptor::kContext); \
+ auto maybe_feedback_vector = \
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector); \
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
\
TVARIABLE(Smi, var_type_feedback); \
TNode<Oddball> result = RelationalComparison(Operation::k##Name, lhs, rhs, \
@@ -90,12 +87,12 @@ DEF_COMPARE(GreaterThanOrEqual)
#undef DEF_COMPARE
TF_BUILTIN(Equal_WithFeedback, CodeStubAssembler) {
- TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> rhs = CAST(Parameter(Descriptor::kRight));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<UintPtrT> slot = UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot));
+ auto lhs = Parameter<Object>(Descriptor::kLeft);
+ auto rhs = Parameter<Object>(Descriptor::kRight);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
TVARIABLE(Smi, var_type_feedback);
TNode<Oddball> result = Equal(lhs, rhs, context, &var_type_feedback);
@@ -105,11 +102,11 @@ TF_BUILTIN(Equal_WithFeedback, CodeStubAssembler) {
}
TF_BUILTIN(StrictEqual_WithFeedback, CodeStubAssembler) {
- TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> rhs = CAST(Parameter(Descriptor::kRight));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<UintPtrT> slot = UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot));
+ auto lhs = Parameter<Object>(Descriptor::kLeft);
+ auto rhs = Parameter<Object>(Descriptor::kRight);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
TVARIABLE(Smi, var_type_feedback);
TNode<Oddball> result = StrictEqual(lhs, rhs, &var_type_feedback);
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index bcc2f8ea64..7d133a6198 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -350,9 +350,9 @@ ObjectEntriesValuesBuiltinsAssembler::FinalizeValuesOrEntriesJSArray(
}
TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
- TNode<Object> object = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto object = Parameter<Object>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label call_runtime(this), return_true(this), return_false(this),
to_primitive(this);
@@ -421,10 +421,10 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
// ES #sec-object.assign
TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<Object> target = args.GetOptionalArgumentValue(0);
// 1. Let to be ? ToObject(target).
@@ -451,8 +451,8 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
// ES #sec-object.keys
TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
- TNode<Object> object = CAST(Parameter(Descriptor::kObject));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto object = Parameter<Object>(Descriptor::kObject);
+ auto context = Parameter<Context>(Descriptor::kContext);
TVARIABLE(Smi, var_length);
TVARIABLE(FixedArrayBase, var_elements);
@@ -541,8 +541,8 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
// ES #sec-object.getOwnPropertyNames
TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
- TNode<Object> object = CAST(Parameter(Descriptor::kObject));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto object = Parameter<Object>(Descriptor::kObject);
+ auto context = Parameter<Context>(Descriptor::kContext);
TVARIABLE(Smi, var_length);
TVARIABLE(FixedArrayBase, var_elements);
@@ -650,26 +650,22 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
}
TF_BUILTIN(ObjectValues, ObjectEntriesValuesBuiltinsAssembler) {
- TNode<JSObject> object =
- TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject));
- TNode<Context> context =
- TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
+ auto object = UncheckedParameter<JSObject>(Descriptor::kObject);
+ auto context = UncheckedParameter<Context>(Descriptor::kContext);
GetOwnValuesOrEntries(context, object, CollectType::kValues);
}
TF_BUILTIN(ObjectEntries, ObjectEntriesValuesBuiltinsAssembler) {
- TNode<JSObject> object =
- TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject));
- TNode<Context> context =
- TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
+ auto object = UncheckedParameter<JSObject>(Descriptor::kObject);
+ auto context = UncheckedParameter<Context>(Descriptor::kContext);
GetOwnValuesOrEntries(context, object, CollectType::kEntries);
}
// ES #sec-object.prototype.isprototypeof
TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label if_receiverisnullorundefined(this, Label::kDeferred),
if_valueisnotreceiver(this, Label::kDeferred);
@@ -723,8 +719,8 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
if_symbol(this, Label::kDeferred), if_value(this),
if_bigint(this, Label::kDeferred);
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto context = Parameter<Context>(Descriptor::kContext);
TVARIABLE(String, var_default);
TVARIABLE(HeapObject, var_holder);
@@ -1024,12 +1020,12 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
int const kPropertiesArg = 1;
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> prototype = args.GetOptionalArgumentValue(kPrototypeArg);
TNode<Object> properties = args.GetOptionalArgumentValue(kPropertiesArg);
- TNode<NativeContext> native_context = CAST(Parameter(Descriptor::kContext));
+ auto native_context = Parameter<NativeContext>(Descriptor::kContext);
Label call_runtime(this, Label::kDeferred), prototype_valid(this),
no_properties(this);
@@ -1111,8 +1107,8 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
// ES #sec-object.is
TF_BUILTIN(ObjectIs, ObjectBuiltinsAssembler) {
- const TNode<Object> left = CAST(Parameter(Descriptor::kLeft));
- const TNode<Object> right = CAST(Parameter(Descriptor::kRight));
+ const auto left = Parameter<Object>(Descriptor::kLeft);
+ const auto right = Parameter<Object>(Descriptor::kRight);
Label return_true(this), return_false(this);
BranchIfSameValue(left, right, &return_true, &return_false);
@@ -1125,9 +1121,9 @@ TF_BUILTIN(ObjectIs, ObjectBuiltinsAssembler) {
}
TF_BUILTIN(CreateIterResultObject, ObjectBuiltinsAssembler) {
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- const TNode<Oddball> done = CAST(Parameter(Descriptor::kDone));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto value = Parameter<Object>(Descriptor::kValue);
+ const auto done = Parameter<Oddball>(Descriptor::kDone);
+ const auto context = Parameter<Context>(Descriptor::kContext);
const TNode<NativeContext> native_context = LoadNativeContext(context);
const TNode<Map> map = CAST(
@@ -1142,28 +1138,28 @@ TF_BUILTIN(CreateIterResultObject, ObjectBuiltinsAssembler) {
}
TF_BUILTIN(HasProperty, ObjectBuiltinsAssembler) {
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- TNode<Object> object = CAST(Parameter(Descriptor::kObject));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto key = Parameter<Object>(Descriptor::kKey);
+ auto object = Parameter<Object>(Descriptor::kObject);
+ auto context = Parameter<Context>(Descriptor::kContext);
Return(HasProperty(context, object, key, kHasProperty));
}
TF_BUILTIN(InstanceOf, ObjectBuiltinsAssembler) {
- TNode<Object> object = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> callable = CAST(Parameter(Descriptor::kRight));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto object = Parameter<Object>(Descriptor::kLeft);
+ auto callable = Parameter<Object>(Descriptor::kRight);
+ auto context = Parameter<Context>(Descriptor::kContext);
Return(InstanceOf(object, callable, context));
}
TF_BUILTIN(InstanceOf_WithFeedback, ObjectBuiltinsAssembler) {
- TNode<Object> object = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> callable = CAST(Parameter(Descriptor::kRight));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<UintPtrT> slot = UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot));
+ auto object = Parameter<Object>(Descriptor::kLeft);
+ auto callable = Parameter<Object>(Descriptor::kRight);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
CollectInstanceOfFeedback(callable, context, maybe_feedback_vector, slot);
Return(InstanceOf(object, callable, context));
@@ -1171,24 +1167,17 @@ TF_BUILTIN(InstanceOf_WithFeedback, ObjectBuiltinsAssembler) {
// ES6 section 7.3.19 OrdinaryHasInstance ( C, O )
TF_BUILTIN(OrdinaryHasInstance, ObjectBuiltinsAssembler) {
- TNode<Object> constructor = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> object = CAST(Parameter(Descriptor::kRight));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto constructor = Parameter<Object>(Descriptor::kLeft);
+ auto object = Parameter<Object>(Descriptor::kRight);
+ auto context = Parameter<Context>(Descriptor::kContext);
Return(OrdinaryHasInstance(context, constructor, object));
}
-TF_BUILTIN(GetSuperConstructor, ObjectBuiltinsAssembler) {
- TNode<JSFunction> object = CAST(Parameter(Descriptor::kObject));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- Return(GetSuperConstructor(context, object));
-}
-
TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
- TNode<JSFunction> closure = CAST(Parameter(Descriptor::kClosure));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto closure = Parameter<JSFunction>(Descriptor::kClosure);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto context = Parameter<Context>(Descriptor::kContext);
// Get the initial map from the function, jumping to the runtime if we don't
// have one.
@@ -1253,10 +1242,9 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
// ES6 section 19.1.2.7 Object.getOwnPropertyDescriptor ( O, P )
TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ CSA_ASSERT(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, argc);
TNode<Object> object_input = args.GetOptionalArgumentValue(0);
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index b482c6ba02..16f81dc3d0 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -38,7 +38,7 @@ BUILTIN(ObjectPrototypePropertyIsEnumerable) {
// ES6 section 19.1.2.3 Object.defineProperties
BUILTIN(ObjectDefineProperties) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_LE(3, args.length());
Handle<Object> target = args.at(1);
Handle<Object> properties = args.at(2);
@@ -49,7 +49,7 @@ BUILTIN(ObjectDefineProperties) {
// ES6 section 19.1.2.4 Object.defineProperty
BUILTIN(ObjectDefineProperty) {
HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
+ DCHECK_LE(4, args.length());
Handle<Object> target = args.at(1);
Handle<Object> key = args.at(2);
Handle<Object> attributes = args.at(3);
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 5b4b9d2536..74ac2b6681 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -83,11 +83,10 @@ TNode<JSFunction> ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(
}
TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
TNode<IntPtrT> argc_ptr = ChangeInt32ToIntPtr(argc);
- TNode<JSProxy> proxy = CAST(Parameter(Descriptor::kFunction));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto proxy = Parameter<JSProxy>(Descriptor::kFunction);
+ auto context = Parameter<Context>(Descriptor::kContext);
CSA_ASSERT(this, IsCallable(proxy));
@@ -139,12 +138,11 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
}
TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
TNode<IntPtrT> argc_ptr = ChangeInt32ToIntPtr(argc);
- TNode<JSProxy> proxy = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto proxy = Parameter<JSProxy>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto context = Parameter<Context>(Descriptor::kContext);
CSA_ASSERT(this, IsCallable(proxy));
diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc
index 8ad1e56143..cf835b3476 100644
--- a/deps/v8/src/builtins/builtins-reflect.cc
+++ b/deps/v8/src/builtins/builtins-reflect.cc
@@ -19,7 +19,7 @@ namespace internal {
// ES6 section 26.1.3 Reflect.defineProperty
BUILTIN(ReflectDefineProperty) {
HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
+ DCHECK_LE(4, args.length());
Handle<Object> target = args.at(1);
Handle<Object> key = args.at(2);
Handle<Object> attributes = args.at(3);
@@ -49,7 +49,7 @@ BUILTIN(ReflectDefineProperty) {
// ES6 section 26.1.7 Reflect.getOwnPropertyDescriptor
BUILTIN(ReflectGetOwnPropertyDescriptor) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_LE(3, args.length());
Handle<Object> target = args.at(1);
Handle<Object> key = args.at(2);
@@ -75,7 +75,7 @@ BUILTIN(ReflectGetOwnPropertyDescriptor) {
// ES6 section 26.1.11 Reflect.ownKeys
BUILTIN(ReflectOwnKeys) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ DCHECK_LE(2, args.length());
Handle<Object> target = args.at(1);
if (!target->IsJSReceiver()) {
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 8be87180eb..fa0f45e831 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -372,7 +372,8 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
ToDirectStringAssembler to_direct(state(), string);
TVARIABLE(HeapObject, var_result);
- Label out(this), atom(this), runtime(this, Label::kDeferred);
+ Label out(this), atom(this), runtime(this, Label::kDeferred),
+ retry_experimental(this, Label::kDeferred);
// External constants.
TNode<ExternalReference> isolate_address =
@@ -595,6 +596,10 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
GotoIf(IntPtrEqual(int_result,
IntPtrConstant(RegExp::kInternalRegExpException)),
&if_exception);
+ GotoIf(IntPtrEqual(
+ int_result,
+ IntPtrConstant(RegExp::kInternalRegExpFallbackToExperimental)),
+ &retry_experimental);
CSA_ASSERT(this, IntPtrEqual(int_result,
IntPtrConstant(RegExp::kInternalRegExpRetry)));
@@ -672,6 +677,14 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
Unreachable();
}
+ BIND(&retry_experimental);
+ {
+ var_result =
+ CAST(CallRuntime(Runtime::kRegExpExperimentalOneshotExec, context,
+ regexp, string, last_index, match_info));
+ Goto(&out);
+ }
+
BIND(&runtime);
{
var_result = CAST(CallRuntime(Runtime::kRegExpExec, context, regexp, string,
@@ -813,11 +826,11 @@ void RegExpBuiltinsAssembler::BranchIfRegExpResult(const TNode<Context> context,
// and {match_info} is updated on success.
// The slow path is implemented in RegExp::AtomExec.
TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
- TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kRegExp));
- TNode<String> subject_string = CAST(Parameter(Descriptor::kString));
- TNode<Smi> last_index = CAST(Parameter(Descriptor::kLastIndex));
- TNode<FixedArray> match_info = CAST(Parameter(Descriptor::kMatchInfo));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto regexp = Parameter<JSRegExp>(Descriptor::kRegExp);
+ auto subject_string = Parameter<String>(Descriptor::kString);
+ auto last_index = Parameter<Smi>(Descriptor::kLastIndex);
+ auto match_info = Parameter<FixedArray>(Descriptor::kMatchInfo);
+ auto context = Parameter<Context>(Descriptor::kContext);
CSA_ASSERT(this, TaggedIsPositiveSmi(last_index));
@@ -874,11 +887,11 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
}
TF_BUILTIN(RegExpExecInternal, RegExpBuiltinsAssembler) {
- TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kRegExp));
- TNode<String> string = CAST(Parameter(Descriptor::kString));
- TNode<Number> last_index = CAST(Parameter(Descriptor::kLastIndex));
- TNode<RegExpMatchInfo> match_info = CAST(Parameter(Descriptor::kMatchInfo));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto regexp = Parameter<JSRegExp>(Descriptor::kRegExp);
+ auto string = Parameter<String>(Descriptor::kString);
+ auto last_index = Parameter<Number>(Descriptor::kLastIndex);
+ auto match_info = Parameter<RegExpMatchInfo>(Descriptor::kMatchInfo);
+ auto context = Parameter<Context>(Descriptor::kContext);
CSA_ASSERT(this, IsNumberNormalized(last_index));
CSA_ASSERT(this, IsNumberPositive(last_index));
@@ -916,6 +929,7 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
CASE_FOR_FLAG(JSRegExp::kGlobal);
CASE_FOR_FLAG(JSRegExp::kIgnoreCase);
+ CASE_FOR_FLAG(JSRegExp::kLinear);
CASE_FOR_FLAG(JSRegExp::kMultiline);
CASE_FOR_FLAG(JSRegExp::kDotAll);
CASE_FOR_FLAG(JSRegExp::kUnicode);
@@ -948,6 +962,32 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
CASE_FOR_FLAG("unicode", JSRegExp::kUnicode);
CASE_FOR_FLAG("sticky", JSRegExp::kSticky);
#undef CASE_FOR_FLAG
+
+ {
+ Label next(this);
+
+ // Check the runtime value of FLAG_enable_experimental_regexp_engine
+ // first.
+ TNode<Word32T> flag_value = UncheckedCast<Word32T>(
+ Load(MachineType::Uint8(),
+ ExternalConstant(
+ ExternalReference::
+ address_of_enable_experimental_regexp_engine())));
+ GotoIf(Word32Equal(Word32And(flag_value, Int32Constant(0xFF)),
+ Int32Constant(0)),
+ &next);
+
+ const TNode<Object> flag = GetProperty(
+ context, regexp, isolate->factory()->InternalizeUtf8String("linear"));
+ Label if_isflagset(this);
+ BranchIfToBooleanIsTrue(flag, &if_isflagset, &next);
+ BIND(&if_isflagset);
+ var_length = Uint32Add(var_length.value(), Uint32Constant(1));
+ var_flags =
+ Signed(WordOr(var_flags.value(), IntPtrConstant(JSRegExp::kLinear)));
+ Goto(&next);
+ BIND(&next);
+ }
}
// Allocate a string of the required length and fill it with the corresponding
@@ -973,6 +1013,7 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
CASE_FOR_FLAG(JSRegExp::kGlobal, 'g');
CASE_FOR_FLAG(JSRegExp::kIgnoreCase, 'i');
+ CASE_FOR_FLAG(JSRegExp::kLinear, 'l');
CASE_FOR_FLAG(JSRegExp::kMultiline, 'm');
CASE_FOR_FLAG(JSRegExp::kDotAll, 's');
CASE_FOR_FLAG(JSRegExp::kUnicode, 'u');
@@ -1007,10 +1048,10 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpInitialize(
// ES#sec-regexp-pattern-flags
// RegExp ( pattern, flags )
TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
- TNode<Object> pattern = CAST(Parameter(Descriptor::kPattern));
- TNode<Object> flags = CAST(Parameter(Descriptor::kFlags));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto pattern = Parameter<Object>(Descriptor::kPattern);
+ auto flags = Parameter<Object>(Descriptor::kFlags);
+ auto new_target = Parameter<Object>(Descriptor::kJSNewTarget);
+ auto context = Parameter<Context>(Descriptor::kContext);
Isolate* isolate = this->isolate();
@@ -1128,10 +1169,10 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
// ES#sec-regexp.prototype.compile
// RegExp.prototype.compile ( pattern, flags )
TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
- TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> maybe_pattern = CAST(Parameter(Descriptor::kPattern));
- TNode<Object> maybe_flags = CAST(Parameter(Descriptor::kFlags));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto maybe_receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto maybe_pattern = Parameter<Object>(Descriptor::kPattern);
+ auto maybe_flags = Parameter<Object>(Descriptor::kFlags);
+ auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, maybe_receiver, JS_REG_EXP_TYPE,
"RegExp.prototype.compile");
@@ -1188,11 +1229,24 @@ TNode<BoolT> RegExpBuiltinsAssembler::FastFlagGetter(TNode<JSRegExp> regexp,
TNode<BoolT> RegExpBuiltinsAssembler::SlowFlagGetter(TNode<Context> context,
TNode<Object> regexp,
JSRegExp::Flag flag) {
- Label out(this);
+ Label out(this), if_true(this), if_false(this);
TVARIABLE(BoolT, var_result);
+ // Only enabled based on a runtime flag.
+ if (flag == JSRegExp::kLinear) {
+ TNode<Word32T> flag_value = UncheckedCast<Word32T>(Load(
+ MachineType::Uint8(),
+ ExternalConstant(ExternalReference::
+ address_of_enable_experimental_regexp_engine())));
+ GotoIf(Word32Equal(Word32And(flag_value, Int32Constant(0xFF)),
+ Int32Constant(0)),
+ &if_false);
+ }
+
Handle<String> name;
switch (flag) {
+ case JSRegExp::kNone:
+ UNREACHABLE();
case JSRegExp::kGlobal:
name = isolate()->factory()->global_string();
break;
@@ -1211,13 +1265,12 @@ TNode<BoolT> RegExpBuiltinsAssembler::SlowFlagGetter(TNode<Context> context,
case JSRegExp::kUnicode:
name = isolate()->factory()->unicode_string();
break;
- default:
- UNREACHABLE();
+ case JSRegExp::kLinear:
+ name = isolate()->factory()->linear_string();
+ break;
}
TNode<Object> value = GetProperty(context, regexp, name);
-
- Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
@@ -1243,7 +1296,6 @@ TNode<BoolT> RegExpBuiltinsAssembler::FlagGetter(TNode<Context> context,
TNode<Number> RegExpBuiltinsAssembler::AdvanceStringIndex(
TNode<String> string, TNode<Number> index, TNode<BoolT> is_unicode,
bool is_fastpath) {
- CSA_ASSERT(this, IsString(string));
CSA_ASSERT(this, IsNumberNormalized(index));
if (is_fastpath) CSA_ASSERT(this, TaggedIsPositiveSmi(index));
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index 26cf4fe159..1a004c4939 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -19,10 +19,10 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
protected:
- using AssemblerFunction = Node* (CodeAssembler::*)(MachineType type,
- Node* base, Node* offset,
- Node* value,
- Node* value_high);
+ using AssemblerFunction =
+ Node* (CodeAssembler::*)(MachineType type, TNode<RawPtrT> base,
+ TNode<UintPtrT> offset, Node* value,
+ base::Optional<TNode<UintPtrT>> value_high);
TNode<JSArrayBuffer> ValidateIntegerTypedArray(
TNode<Object> maybe_array, TNode<Context> context,
TNode<Int32T>* out_elements_kind, TNode<RawPtrT>* out_backing_store,
@@ -32,8 +32,8 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
TNode<Object> index,
TNode<Context> context);
- inline void DebugSanityCheckAtomicIndex(TNode<JSTypedArray> array,
- TNode<UintPtrT> index);
+ inline void DebugCheckAtomicIndex(TNode<JSTypedArray> array,
+ TNode<UintPtrT> index);
void AtomicBinopBuiltinCommon(TNode<Object> maybe_array, TNode<Object> index,
TNode<Object> value, TNode<Context> context,
@@ -127,7 +127,7 @@ TNode<UintPtrT> SharedArrayBufferBuiltinsAssembler::ValidateAtomicAccess(
return index_uintptr;
}
-void SharedArrayBufferBuiltinsAssembler::DebugSanityCheckAtomicIndex(
+void SharedArrayBufferBuiltinsAssembler::DebugCheckAtomicIndex(
TNode<JSTypedArray> array, TNode<UintPtrT> index) {
// In Debug mode, we re-validate the index as a sanity check because ToInteger
// above calls out to JavaScript. Atomics work on ArrayBuffers, which may be
@@ -165,9 +165,9 @@ TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromUnsigned64(
// https://tc39.es/ecma262/#sec-atomicload
TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
- TNode<Object> maybe_array = CAST(Parameter(Descriptor::kArray));
- TNode<Object> index = CAST(Parameter(Descriptor::kIndex));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto maybe_array = Parameter<Object>(Descriptor::kArray);
+ auto index = Parameter<Object>(Descriptor::kIndex);
+ auto context = Parameter<Context>(Descriptor::kContext);
// 1. Let buffer be ? ValidateIntegerTypedArray(typedArray).
Label detached(this);
@@ -258,10 +258,10 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
// https://tc39.es/ecma262/#sec-atomics.store
TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
- TNode<Object> maybe_array = CAST(Parameter(Descriptor::kArray));
- TNode<Object> index = CAST(Parameter(Descriptor::kIndex));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto maybe_array = Parameter<Object>(Descriptor::kArray);
+ auto index = Parameter<Object>(Descriptor::kIndex);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
// 1. Let buffer be ? ValidateIntegerTypedArray(typedArray).
Label detached(this);
@@ -295,7 +295,7 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
TNode<Word32T> value_word32 = TruncateTaggedToWord32(context, value_integer);
- DebugSanityCheckAtomicIndex(array, index_word);
+ DebugCheckAtomicIndex(array, index_word);
// Steps 8-13.
//
@@ -336,7 +336,7 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
// 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
GotoIf(IsDetachedBuffer(array_buffer), &detached);
- DebugSanityCheckAtomicIndex(array, index_word);
+ DebugCheckAtomicIndex(array, index_word);
TVARIABLE(UintPtrT, var_low);
TVARIABLE(UintPtrT, var_high);
@@ -360,10 +360,10 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
// https://tc39.es/ecma262/#sec-atomics.exchange
TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
- TNode<Object> maybe_array = CAST(Parameter(Descriptor::kArray));
- TNode<Object> index = CAST(Parameter(Descriptor::kIndex));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto maybe_array = Parameter<Object>(Descriptor::kArray);
+ auto index = Parameter<Object>(Descriptor::kIndex);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
// Inlines AtomicReadModifyWrite
// https://tc39.es/ecma262/#sec-atomicreadmodifywrite
@@ -405,7 +405,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
// buffer to become detached.
GotoIf(IsDetachedBuffer(array_buffer), &detached);
- DebugSanityCheckAtomicIndex(array, index_word);
+ DebugCheckAtomicIndex(array, index_word);
TNode<Word32T> value_word32 = TruncateTaggedToWord32(context, value_integer);
@@ -424,29 +424,31 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
BIND(&i8);
Return(SmiFromInt32(AtomicExchange(MachineType::Int8(), backing_store,
- index_word, value_word32)));
+ index_word, value_word32, base::nullopt)));
BIND(&u8);
Return(SmiFromInt32(AtomicExchange(MachineType::Uint8(), backing_store,
- index_word, value_word32)));
+ index_word, value_word32, base::nullopt)));
BIND(&i16);
Return(SmiFromInt32(AtomicExchange(MachineType::Int16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ WordShl(index_word, UintPtrConstant(1)),
+ value_word32, base::nullopt)));
BIND(&u16);
Return(SmiFromInt32(AtomicExchange(MachineType::Uint16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ WordShl(index_word, UintPtrConstant(1)),
+ value_word32, base::nullopt)));
BIND(&i32);
- Return(ChangeInt32ToTagged(AtomicExchange(MachineType::Int32(), backing_store,
- WordShl(index_word, 2),
- value_word32)));
+ Return(ChangeInt32ToTagged(AtomicExchange(
+ MachineType::Int32(), backing_store,
+ WordShl(index_word, UintPtrConstant(2)), value_word32, base::nullopt)));
BIND(&u32);
- Return(ChangeUint32ToTagged(
- AtomicExchange(MachineType::Uint32(), backing_store,
- WordShl(index_word, 2), value_word32)));
+ Return(ChangeUint32ToTagged(AtomicExchange(
+ MachineType::Uint32(), backing_store,
+ WordShl(index_word, UintPtrConstant(2)), value_word32, base::nullopt)));
BIND(&big);
// 4. If typedArray.[[ContentType]] is BigInt, let v be ? ToBigInt(value).
@@ -455,7 +457,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
// 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
GotoIf(IsDetachedBuffer(array_buffer), &detached);
- DebugSanityCheckAtomicIndex(array, index_word);
+ DebugCheckAtomicIndex(array, index_word);
TVARIABLE(UintPtrT, var_low);
TVARIABLE(UintPtrT, var_high);
@@ -469,14 +471,14 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
// This uses Uint64() intentionally: AtomicExchange is not implemented for
// Int64(), which is fine because the machine instruction only cares
// about words.
- Return(BigIntFromSigned64(AtomicExchange(MachineType::Uint64(), backing_store,
- WordShl(index_word, 3),
- var_low.value(), high)));
+ Return(BigIntFromSigned64(AtomicExchange(
+ MachineType::Uint64(), backing_store,
+ WordShl(index_word, UintPtrConstant(3)), var_low.value(), high)));
BIND(&u64);
- Return(BigIntFromUnsigned64(
- AtomicExchange(MachineType::Uint64(), backing_store,
- WordShl(index_word, 3), var_low.value(), high)));
+ Return(BigIntFromUnsigned64(AtomicExchange(
+ MachineType::Uint64(), backing_store,
+ WordShl(index_word, UintPtrConstant(3)), var_low.value(), high)));
// This shouldn't happen, we've already validated the type.
BIND(&other);
@@ -492,11 +494,11 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
// https://tc39.es/ecma262/#sec-atomics.compareexchange
TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
- TNode<Object> maybe_array = CAST(Parameter(Descriptor::kArray));
- TNode<Object> index = CAST(Parameter(Descriptor::kIndex));
- TNode<Object> old_value = CAST(Parameter(Descriptor::kOldValue));
- TNode<Object> new_value = CAST(Parameter(Descriptor::kNewValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto maybe_array = Parameter<Object>(Descriptor::kArray);
+ auto index = Parameter<Object>(Descriptor::kIndex);
+ auto old_value = Parameter<Object>(Descriptor::kOldValue);
+ auto new_value = Parameter<Object>(Descriptor::kNewValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
// 1. Let buffer be ? ValidateIntegerTypedArray(typedArray).
Label detached(this);
@@ -540,7 +542,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
// buffer to become detached.
GotoIf(IsDetachedBuffer(array_buffer), &detached);
- DebugSanityCheckAtomicIndex(array, index_word);
+ DebugCheckAtomicIndex(array, index_word);
TNode<Word32T> old_value_word32 =
TruncateTaggedToWord32(context, old_value_integer);
@@ -600,7 +602,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
// 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
GotoIf(IsDetachedBuffer(array_buffer), &detached);
- DebugSanityCheckAtomicIndex(array, index_word);
+ DebugCheckAtomicIndex(array, index_word);
TVARIABLE(UintPtrT, var_old_low);
TVARIABLE(UintPtrT, var_old_high);
@@ -640,15 +642,15 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
}
}
-#define BINOP_BUILTIN(op, method_name) \
- TF_BUILTIN(Atomics##op, SharedArrayBufferBuiltinsAssembler) { \
- TNode<Object> array = CAST(Parameter(Descriptor::kArray)); \
- TNode<Object> index = CAST(Parameter(Descriptor::kIndex)); \
- TNode<Object> value = CAST(Parameter(Descriptor::kValue)); \
- TNode<Context> context = CAST(Parameter(Descriptor::kContext)); \
- AtomicBinopBuiltinCommon(array, index, value, context, \
- &CodeAssembler::Atomic##op, \
- Runtime::kAtomics##op, method_name); \
+#define BINOP_BUILTIN(op, method_name) \
+ TF_BUILTIN(Atomics##op, SharedArrayBufferBuiltinsAssembler) { \
+ auto array = Parameter<Object>(Descriptor::kArray); \
+ auto index = Parameter<Object>(Descriptor::kIndex); \
+ auto value = Parameter<Object>(Descriptor::kValue); \
+ auto context = Parameter<Context>(Descriptor::kContext); \
+ AtomicBinopBuiltinCommon(array, index, value, context, \
+ &CodeAssembler::Atomic##op, \
+ Runtime::kAtomics##op, method_name); \
}
// https://tc39.es/ecma262/#sec-atomics.add
BINOP_BUILTIN(Add, "Atomics.add")
@@ -703,7 +705,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
// buffer to become detached.
GotoIf(IsDetachedBuffer(array_buffer), &detached);
- DebugSanityCheckAtomicIndex(array, index_word);
+ DebugCheckAtomicIndex(array, index_word);
TNode<Word32T> value_word32 = TruncateTaggedToWord32(context, value_integer);
@@ -721,33 +723,29 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromInt32((this->*function)(MachineType::Int8(), backing_store,
- index_word, value_word32, nullptr)));
-
+ Return(
+ SmiFromInt32((this->*function)(MachineType::Int8(), backing_store,
+ index_word, value_word32, base::nullopt)));
BIND(&u8);
- Return(SmiFromInt32((this->*function)(MachineType::Uint8(), backing_store,
- index_word, value_word32, nullptr)));
-
+ Return(
+ SmiFromInt32((this->*function)(MachineType::Uint8(), backing_store,
+ index_word, value_word32, base::nullopt)));
BIND(&i16);
Return(SmiFromInt32((this->*function)(MachineType::Int16(), backing_store,
- WordShl(index_word, 1), value_word32,
- nullptr)));
-
+ WordShl(index_word, UintPtrConstant(1)),
+ value_word32, base::nullopt)));
BIND(&u16);
Return(SmiFromInt32((this->*function)(MachineType::Uint16(), backing_store,
- WordShl(index_word, 1), value_word32,
- nullptr)));
-
+ WordShl(index_word, UintPtrConstant(1)),
+ value_word32, base::nullopt)));
BIND(&i32);
- Return(ChangeInt32ToTagged(
- (this->*function)(MachineType::Int32(), backing_store,
- WordShl(index_word, 2), value_word32, nullptr)));
-
+ Return(ChangeInt32ToTagged((this->*function)(
+ MachineType::Int32(), backing_store,
+ WordShl(index_word, UintPtrConstant(2)), value_word32, base::nullopt)));
BIND(&u32);
- Return(ChangeUint32ToTagged(
- (this->*function)(MachineType::Uint32(), backing_store,
- WordShl(index_word, 2), value_word32, nullptr)));
-
+ Return(ChangeUint32ToTagged((this->*function)(
+ MachineType::Uint32(), backing_store,
+ WordShl(index_word, UintPtrConstant(2)), value_word32, base::nullopt)));
BIND(&big);
// 4. If typedArray.[[ContentType]] is BigInt, let v be ? ToBigInt(value).
TNode<BigInt> value_bigint = ToBigInt(context, value);
@@ -755,7 +753,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
// 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
GotoIf(IsDetachedBuffer(array_buffer), &detached);
- DebugSanityCheckAtomicIndex(array, index_word);
+ DebugCheckAtomicIndex(array, index_word);
TVARIABLE(UintPtrT, var_low);
TVARIABLE(UintPtrT, var_high);
@@ -769,15 +767,13 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
// This uses Uint64() intentionally: Atomic* ops are not implemented for
// Int64(), which is fine because the machine instructions only care
// about words.
- Return(BigIntFromSigned64(
- (this->*function)(MachineType::Uint64(), backing_store,
- WordShl(index_word, 3), var_low.value(), high)));
-
+ Return(BigIntFromSigned64((this->*function)(
+ MachineType::Uint64(), backing_store,
+ WordShl(index_word, UintPtrConstant(3)), var_low.value(), high)));
BIND(&u64);
- Return(BigIntFromUnsigned64(
- (this->*function)(MachineType::Uint64(), backing_store,
- WordShl(index_word, 3), var_low.value(), high)));
-
+ Return(BigIntFromUnsigned64((this->*function)(
+ MachineType::Uint64(), backing_store,
+ WordShl(index_word, UintPtrConstant(3)), var_low.value(), high)));
// This shouldn't happen, we've already validated the type.
BIND(&other);
Unreachable();
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 9920369136..aa982a3c1b 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -46,8 +46,7 @@ TNode<RawPtrT> StringBuiltinsAssembler::DirectStringData(
Word32And(string_instance_type,
Int32Constant(kUncachedExternalStringMask)),
Int32Constant(kUncachedExternalStringTag)));
- var_data =
- DecodeExternalPointer(LoadExternalStringResourceData(CAST(string)));
+ var_data = LoadExternalStringResourceDataPtr(CAST(string));
Goto(&if_join);
}
@@ -344,9 +343,11 @@ TNode<String> StringBuiltinsAssembler::AllocateConsString(TNode<Uint32T> length,
return CAST(result);
}
-TNode<String> StringBuiltinsAssembler::StringAdd(SloppyTNode<Context> context,
- TNode<String> left,
- TNode<String> right) {
+TNode<String> StringBuiltinsAssembler::StringAdd(
+ TNode<ContextOrEmptyContext> context, TNode<String> left,
+ TNode<String> right) {
+ CSA_ASSERT(this, IsZeroOrContext(context));
+
TVARIABLE(String, result);
Label check_right(this), runtime(this, Label::kDeferred), cons(this),
done(this, &result), done_native(this, &result);
@@ -540,16 +541,18 @@ TNode<String> StringBuiltinsAssembler::DerefIndirectString(
}
TF_BUILTIN(StringAdd_CheckNone, StringBuiltinsAssembler) {
- TNode<String> left = CAST(Parameter(Descriptor::kLeft));
- TNode<String> right = CAST(Parameter(Descriptor::kRight));
- Node* context = Parameter(Descriptor::kContext);
+ auto left = Parameter<String>(Descriptor::kLeft);
+ auto right = Parameter<String>(Descriptor::kRight);
+ TNode<ContextOrEmptyContext> context =
+ UncheckedParameter<ContextOrEmptyContext>(Descriptor::kContext);
+ CSA_ASSERT(this, IsZeroOrContext(context));
Return(StringAdd(context, left, right));
}
TF_BUILTIN(SubString, StringBuiltinsAssembler) {
- TNode<String> string = CAST(Parameter(Descriptor::kString));
- TNode<Smi> from = CAST(Parameter(Descriptor::kFrom));
- TNode<Smi> to = CAST(Parameter(Descriptor::kTo));
+ auto string = Parameter<String>(Descriptor::kString);
+ auto from = Parameter<Smi>(Descriptor::kFrom);
+ auto to = Parameter<Smi>(Descriptor::kTo);
Return(SubString(string, SmiUntag(from), SmiUntag(to)));
}
@@ -723,40 +726,39 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(
}
TF_BUILTIN(StringEqual, StringBuiltinsAssembler) {
- TNode<String> left = CAST(Parameter(Descriptor::kLeft));
- TNode<String> right = CAST(Parameter(Descriptor::kRight));
+ auto left = Parameter<String>(Descriptor::kLeft);
+ auto right = Parameter<String>(Descriptor::kRight);
GenerateStringEqual(left, right);
}
TF_BUILTIN(StringLessThan, StringBuiltinsAssembler) {
- TNode<String> left = CAST(Parameter(Descriptor::kLeft));
- TNode<String> right = CAST(Parameter(Descriptor::kRight));
+ auto left = Parameter<String>(Descriptor::kLeft);
+ auto right = Parameter<String>(Descriptor::kRight);
GenerateStringRelationalComparison(left, right, Operation::kLessThan);
}
TF_BUILTIN(StringLessThanOrEqual, StringBuiltinsAssembler) {
- TNode<String> left = CAST(Parameter(Descriptor::kLeft));
- TNode<String> right = CAST(Parameter(Descriptor::kRight));
+ auto left = Parameter<String>(Descriptor::kLeft);
+ auto right = Parameter<String>(Descriptor::kRight);
GenerateStringRelationalComparison(left, right, Operation::kLessThanOrEqual);
}
TF_BUILTIN(StringGreaterThan, StringBuiltinsAssembler) {
- TNode<String> left = CAST(Parameter(Descriptor::kLeft));
- TNode<String> right = CAST(Parameter(Descriptor::kRight));
+ auto left = Parameter<String>(Descriptor::kLeft);
+ auto right = Parameter<String>(Descriptor::kRight);
GenerateStringRelationalComparison(left, right, Operation::kGreaterThan);
}
TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) {
- TNode<String> left = CAST(Parameter(Descriptor::kLeft));
- TNode<String> right = CAST(Parameter(Descriptor::kRight));
+ auto left = Parameter<String>(Descriptor::kLeft);
+ auto right = Parameter<String>(Descriptor::kRight);
GenerateStringRelationalComparison(left, right,
Operation::kGreaterThanOrEqual);
}
TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) {
- TNode<String> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<IntPtrT> position =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kPosition));
+ auto receiver = Parameter<String>(Descriptor::kReceiver);
+ auto position = UncheckedParameter<IntPtrT>(Descriptor::kPosition);
// TODO(sigurds) Figure out if passing length as argument pays off.
TNode<IntPtrT> length = LoadStringLengthAsWord(receiver);
@@ -770,9 +772,8 @@ TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) {
}
TF_BUILTIN(StringFromCodePointAt, StringBuiltinsAssembler) {
- TNode<String> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<IntPtrT> position =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kPosition));
+ auto receiver = Parameter<String>(Descriptor::kReceiver);
+ auto position = UncheckedParameter<IntPtrT>(Descriptor::kPosition);
// TODO(sigurds) Figure out if passing length as argument pays off.
TNode<IntPtrT> length = LoadStringLengthAsWord(receiver);
@@ -791,9 +792,8 @@ TF_BUILTIN(StringFromCodePointAt, StringBuiltinsAssembler) {
TF_BUILTIN(StringFromCharCode, StringBuiltinsAssembler) {
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
// arguments are reordered.
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
CodeStubArguments arguments(this, argc);
// Check if we have exactly one argument (plus the implicit receiver), i.e.
@@ -1064,9 +1064,9 @@ void StringBuiltinsAssembler::StringIndexOf(
// #sec-string.prototype.indexof
// Unchecked helper for builtins lowering.
TF_BUILTIN(StringIndexOf, StringBuiltinsAssembler) {
- TNode<String> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<String> search_string = CAST(Parameter(Descriptor::kSearchString));
- TNode<Smi> position = CAST(Parameter(Descriptor::kPosition));
+ auto receiver = Parameter<String>(Descriptor::kReceiver);
+ auto search_string = Parameter<String>(Descriptor::kSearchString);
+ auto position = Parameter<Smi>(Descriptor::kPosition);
StringIndexOf(receiver, search_string, position,
[this](TNode<Smi> result) { this->Return(result); });
}
@@ -1075,8 +1075,8 @@ TF_BUILTIN(StringIndexOf, StringBuiltinsAssembler) {
// #sec-string.prototype.includes
TF_BUILTIN(StringPrototypeIncludes, StringIncludesIndexOfAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
Generate(kIncludes, argc, context);
}
@@ -1084,8 +1084,8 @@ TF_BUILTIN(StringPrototypeIncludes, StringIncludesIndexOfAssembler) {
// #sec-string.prototype.indexof
TF_BUILTIN(StringPrototypeIndexOf, StringIncludesIndexOfAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
Generate(kIndexOf, argc, context);
}
@@ -1276,10 +1276,10 @@ TNode<String> StringBuiltinsAssembler::GetSubstitution(
TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
Label out(this);
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> search = CAST(Parameter(Descriptor::kSearch));
- const TNode<Object> replace = CAST(Parameter(Descriptor::kReplace));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto search = Parameter<Object>(Descriptor::kSearch);
+ const auto replace = Parameter<Object>(Descriptor::kReplace);
+ auto context = Parameter<Context>(Descriptor::kContext);
const TNode<Smi> smi_zero = SmiConstant(0);
@@ -1503,9 +1503,9 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
// ES6 #sec-string.prototype.match
TF_BUILTIN(StringPrototypeMatch, StringMatchSearchAssembler) {
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> maybe_regexp = CAST(Parameter(Descriptor::kRegexp));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto maybe_regexp = Parameter<Object>(Descriptor::kRegexp);
+ auto context = Parameter<Context>(Descriptor::kContext);
Generate(kMatch, "String.prototype.match", receiver, maybe_regexp, context);
}
@@ -1514,9 +1514,9 @@ TF_BUILTIN(StringPrototypeMatch, StringMatchSearchAssembler) {
TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
char const* method_name = "String.prototype.matchAll";
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> maybe_regexp = CAST(Parameter(Descriptor::kRegexp));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_regexp = Parameter<Object>(Descriptor::kRegexp);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
TNode<NativeContext> native_context = LoadNativeContext(context);
// 1. Let O be ? RequireObjectCoercible(this value).
@@ -1611,9 +1611,9 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
// ES6 #sec-string.prototype.search
TF_BUILTIN(StringPrototypeSearch, StringMatchSearchAssembler) {
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> maybe_regexp = CAST(Parameter(Descriptor::kRegexp));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto maybe_regexp = Parameter<Object>(Descriptor::kRegexp);
+ auto context = Parameter<Context>(Descriptor::kContext);
Generate(kSearch, "String.prototype.search", receiver, maybe_regexp, context);
}
@@ -1703,13 +1703,13 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
const int kLimitArg = 1;
const TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
const TNode<Object> separator = args.GetOptionalArgumentValue(kSeparatorArg);
const TNode<Object> limit = args.GetOptionalArgumentValue(kLimitArg);
- TNode<NativeContext> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<NativeContext>(Descriptor::kContext);
TNode<Smi> smi_zero = SmiConstant(0);
@@ -1800,185 +1800,13 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
}
TF_BUILTIN(StringSubstring, StringBuiltinsAssembler) {
- TNode<String> string = CAST(Parameter(Descriptor::kString));
- TNode<IntPtrT> from = UncheckedCast<IntPtrT>(Parameter(Descriptor::kFrom));
- TNode<IntPtrT> to = UncheckedCast<IntPtrT>(Parameter(Descriptor::kTo));
+ auto string = Parameter<String>(Descriptor::kString);
+ auto from = UncheckedParameter<IntPtrT>(Descriptor::kFrom);
+ auto to = UncheckedParameter<IntPtrT>(Descriptor::kTo);
Return(SubString(string, from, to));
}
-// ES6 #sec-string.prototype.trim
-TF_BUILTIN(StringPrototypeTrim, StringTrimAssembler) {
- TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- Generate(String::kTrim, "String.prototype.trim", argc, context);
-}
-
-// https://github.com/tc39/proposal-string-left-right-trim
-TF_BUILTIN(StringPrototypeTrimStart, StringTrimAssembler) {
- TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- Generate(String::kTrimStart, "String.prototype.trimLeft", argc, context);
-}
-
-// https://github.com/tc39/proposal-string-left-right-trim
-TF_BUILTIN(StringPrototypeTrimEnd, StringTrimAssembler) {
- TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- Generate(String::kTrimEnd, "String.prototype.trimRight", argc, context);
-}
-
-void StringTrimAssembler::Generate(String::TrimMode mode,
- const char* method_name, TNode<IntPtrT> argc,
- TNode<Context> context) {
- Label return_emptystring(this), if_runtime(this);
-
- CodeStubArguments arguments(this, argc);
- TNode<Object> receiver = arguments.GetReceiver();
-
- // Check that {receiver} is coercible to Object and convert it to a String.
- const TNode<String> string = ToThisString(context, receiver, method_name);
- const TNode<IntPtrT> string_length = LoadStringLengthAsWord(string);
-
- ToDirectStringAssembler to_direct(state(), string);
- to_direct.TryToDirect(&if_runtime);
- const TNode<RawPtrT> string_data = to_direct.PointerToData(&if_runtime);
- const TNode<Int32T> instance_type = to_direct.instance_type();
- const TNode<BoolT> is_stringonebyte =
- IsOneByteStringInstanceType(instance_type);
- const TNode<IntPtrT> string_data_offset = to_direct.offset();
-
- TVARIABLE(IntPtrT, var_start, IntPtrConstant(0));
- TVARIABLE(IntPtrT, var_end, IntPtrSub(string_length, IntPtrConstant(1)));
-
- if (mode == String::kTrimStart || mode == String::kTrim) {
- ScanForNonWhiteSpaceOrLineTerminator(string_data, string_data_offset,
- is_stringonebyte, &var_start,
- string_length, 1, &return_emptystring);
- }
- if (mode == String::kTrimEnd || mode == String::kTrim) {
- ScanForNonWhiteSpaceOrLineTerminator(
- string_data, string_data_offset, is_stringonebyte, &var_end,
- IntPtrConstant(-1), -1, &return_emptystring);
- }
-
- arguments.PopAndReturn(
- SubString(string, var_start.value(),
- IntPtrAdd(var_end.value(), IntPtrConstant(1))));
-
- BIND(&if_runtime);
- arguments.PopAndReturn(
- CallRuntime(Runtime::kStringTrim, context, string, SmiConstant(mode)));
-
- BIND(&return_emptystring);
- arguments.PopAndReturn(EmptyStringConstant());
-}
-
-void StringTrimAssembler::ScanForNonWhiteSpaceOrLineTerminator(
- const TNode<RawPtrT> string_data, const TNode<IntPtrT> string_data_offset,
- const TNode<BoolT> is_stringonebyte, TVariable<IntPtrT>* const var_index,
- const TNode<IntPtrT> end, int increment, Label* const if_none_found) {
- Label if_stringisonebyte(this), out(this);
-
- GotoIf(is_stringonebyte, &if_stringisonebyte);
-
- // Two Byte String
- BuildLoop<Uint16T>(
- var_index, end, increment, if_none_found, &out,
- [&](const TNode<IntPtrT> index) {
- return Load<Uint16T>(
- string_data,
- WordShl(IntPtrAdd(index, string_data_offset), IntPtrConstant(1)));
- });
-
- BIND(&if_stringisonebyte);
- BuildLoop<Uint8T>(var_index, end, increment, if_none_found, &out,
- [&](const TNode<IntPtrT> index) {
- return Load<Uint8T>(string_data,
- IntPtrAdd(index, string_data_offset));
- });
-
- BIND(&out);
-}
-
-template <typename T>
-void StringTrimAssembler::BuildLoop(
- TVariable<IntPtrT>* const var_index, const TNode<IntPtrT> end,
- int increment, Label* const if_none_found, Label* const out,
- const std::function<TNode<T>(const TNode<IntPtrT>)>& get_character) {
- Label loop(this, var_index);
- Goto(&loop);
- BIND(&loop);
- {
- TNode<IntPtrT> index = var_index->value();
- GotoIf(IntPtrEqual(index, end), if_none_found);
- GotoIfNotWhiteSpaceOrLineTerminator(
- UncheckedCast<Uint32T>(get_character(index)), out);
- Increment(var_index, increment);
- Goto(&loop);
- }
-}
-
-void StringTrimAssembler::GotoIfNotWhiteSpaceOrLineTerminator(
- const TNode<Word32T> char_code, Label* const if_not_whitespace) {
- Label out(this);
-
- // 0x0020 - SPACE (Intentionally out of order to fast path a commmon case)
- GotoIf(Word32Equal(char_code, Int32Constant(0x0020)), &out);
-
- // 0x0009 - HORIZONTAL TAB
- GotoIf(Uint32LessThan(char_code, Int32Constant(0x0009)), if_not_whitespace);
- // 0x000A - LINE FEED OR NEW LINE
- // 0x000B - VERTICAL TAB
- // 0x000C - FORMFEED
- // 0x000D - HORIZONTAL TAB
- GotoIf(Uint32LessThanOrEqual(char_code, Int32Constant(0x000D)), &out);
-
- // Common Non-whitespace characters
- GotoIf(Uint32LessThan(char_code, Int32Constant(0x00A0)), if_not_whitespace);
-
- // 0x00A0 - NO-BREAK SPACE
- GotoIf(Word32Equal(char_code, Int32Constant(0x00A0)), &out);
-
- // 0x1680 - Ogham Space Mark
- GotoIf(Word32Equal(char_code, Int32Constant(0x1680)), &out);
-
- // 0x2000 - EN QUAD
- GotoIf(Uint32LessThan(char_code, Int32Constant(0x2000)), if_not_whitespace);
- // 0x2001 - EM QUAD
- // 0x2002 - EN SPACE
- // 0x2003 - EM SPACE
- // 0x2004 - THREE-PER-EM SPACE
- // 0x2005 - FOUR-PER-EM SPACE
- // 0x2006 - SIX-PER-EM SPACE
- // 0x2007 - FIGURE SPACE
- // 0x2008 - PUNCTUATION SPACE
- // 0x2009 - THIN SPACE
- // 0x200A - HAIR SPACE
- GotoIf(Uint32LessThanOrEqual(char_code, Int32Constant(0x200A)), &out);
-
- // 0x2028 - LINE SEPARATOR
- GotoIf(Word32Equal(char_code, Int32Constant(0x2028)), &out);
- // 0x2029 - PARAGRAPH SEPARATOR
- GotoIf(Word32Equal(char_code, Int32Constant(0x2029)), &out);
- // 0x202F - NARROW NO-BREAK SPACE
- GotoIf(Word32Equal(char_code, Int32Constant(0x202F)), &out);
- // 0x205F - MEDIUM MATHEMATICAL SPACE
- GotoIf(Word32Equal(char_code, Int32Constant(0x205F)), &out);
- // 0xFEFF - BYTE ORDER MARK
- GotoIf(Word32Equal(char_code, Int32Constant(0xFEFF)), &out);
- // 0x3000 - IDEOGRAPHIC SPACE
- Branch(Word32Equal(char_code, Int32Constant(0x3000)), &out,
- if_not_whitespace);
-
- BIND(&out);
-}
// Return the |word32| codepoint at {index}. Supports SeqStrings and
// ExternalStrings.
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index 2b4dadbbb0..5e3ee93f17 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -113,8 +113,8 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
TNode<String> AllocateConsString(TNode<Uint32T> length, TNode<String> left,
TNode<String> right);
- TNode<String> StringAdd(SloppyTNode<Context> context, TNode<String> left,
- TNode<String> right);
+ TNode<String> StringAdd(TNode<ContextOrEmptyContext> context,
+ TNode<String> left, TNode<String> right);
// Check if |string| is an indirect (thin or flat cons) string type that can
// be dereferenced by DerefIndirectString.
@@ -184,30 +184,6 @@ class StringIncludesIndexOfAssembler : public StringBuiltinsAssembler {
TNode<Context> context);
};
-class StringTrimAssembler : public StringBuiltinsAssembler {
- public:
- explicit StringTrimAssembler(compiler::CodeAssemblerState* state)
- : StringBuiltinsAssembler(state) {}
-
- V8_EXPORT_PRIVATE void GotoIfNotWhiteSpaceOrLineTerminator(
- const TNode<Word32T> char_code, Label* const if_not_whitespace);
-
- protected:
- void Generate(String::TrimMode mode, const char* method, TNode<IntPtrT> argc,
- TNode<Context> context);
-
- void ScanForNonWhiteSpaceOrLineTerminator(
- const TNode<RawPtrT> string_data, const TNode<IntPtrT> string_data_offset,
- const TNode<BoolT> is_stringonebyte, TVariable<IntPtrT>* const var_index,
- const TNode<IntPtrT> end, int increment, Label* const if_none_found);
-
- template <typename T>
- void BuildLoop(
- TVariable<IntPtrT>* const var_index, const TNode<IntPtrT> end,
- int increment, Label* const if_none_found, Label* const out,
- const std::function<TNode<T>(const TNode<IntPtrT>)>& get_character);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index df5ba93a59..8994211756 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -150,7 +150,7 @@ BUILTIN(StringPrototypeLocaleCompare) {
isolate, str1, str2, args.atOrUndefined(isolate, 2),
args.atOrUndefined(isolate, 3), method));
#else
- DCHECK_EQ(2, args.length());
+ DCHECK_LE(2, args.length());
TO_THIS_STRING(str1, method);
Handle<String> str2;
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 26c67cfc12..3bf0c6e73e 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -25,6 +25,7 @@ void TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields(
TNode<JSTypedArray> holder) {
for (int offset = JSTypedArray::kHeaderSize;
offset < JSTypedArray::kSizeWithEmbedderFields; offset += kTaggedSize) {
+ // TODO(v8:10391, saelo): Handle external pointers in EmbedderDataSlot
StoreObjectField(holder, offset, SmiConstant(0));
}
}
@@ -65,31 +66,32 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kByteLengthOffset,
byte_length);
- StoreJSArrayBufferBackingStore(
- buffer,
- EncodeExternalPointer(ReinterpretCast<RawPtrT>(IntPtrConstant(0))));
- StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kExtensionOffset,
- IntPtrConstant(0));
+ InitializeExternalPointerField(buffer, JSArrayBuffer::kBackingStoreOffset,
+ PointerConstant(nullptr),
+ kArrayBufferBackingStoreTag);
+ StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kExtensionOffset,
+ IntPtrConstant(0));
for (int offset = JSArrayBuffer::kHeaderSize;
offset < JSArrayBuffer::kSizeWithEmbedderFields; offset += kTaggedSize) {
+ // TODO(v8:10391, saelo): Handle external pointers in EmbedderDataSlot
StoreObjectFieldNoWriteBarrier(buffer, offset, SmiConstant(0));
}
return buffer;
}
TF_BUILTIN(TypedArrayBaseConstructor, TypedArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
ThrowTypeError(context, MessageTemplate::kConstructAbstractClass,
"TypedArray");
}
// ES #sec-typedarray-constructors
TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSFunction> target = CAST(Parameter(Descriptor::kJSTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto target = Parameter<JSFunction>(Descriptor::kJSTarget);
+ auto new_target = Parameter<Object>(Descriptor::kJSNewTarget);
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> arg1 = args.GetOptionalArgumentValue(0);
TNode<Object> arg2 = args.GetOptionalArgumentValue(1);
@@ -116,8 +118,8 @@ TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
// ES6 #sec-get-%typedarray%.prototype.bytelength
TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) {
const char* const kMethodName = "get TypedArray.prototype.byteLength";
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
@@ -134,8 +136,8 @@ TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) {
// ES6 #sec-get-%typedarray%.prototype.byteoffset
TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) {
const char* const kMethodName = "get TypedArray.prototype.byteOffset";
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
@@ -152,8 +154,8 @@ TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) {
// ES6 #sec-get-%typedarray%.prototype.length
TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) {
const char* const kMethodName = "get TypedArray.prototype.length";
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
@@ -355,6 +357,12 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
BIND(&next);
}
+void TypedArrayBuiltinsAssembler::AllocateJSTypedArrayExternalPointerEntry(
+ TNode<JSTypedArray> holder) {
+ InitializeExternalPointerField(
+ holder, IntPtrConstant(JSTypedArray::kExternalPointerOffset));
+}
+
void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
TNode<JSTypedArray> holder, TNode<ByteArray> base, TNode<UintPtrT> offset) {
offset = UintPtrAdd(UintPtrConstant(ByteArray::kHeaderSize - kHeapObjectTag),
@@ -373,9 +381,8 @@ void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
offset = Unsigned(IntPtrAdd(offset, isolate_root));
}
- StoreObjectField(holder, JSTypedArray::kBasePointerOffset, base);
- StoreJSTypedArrayExternalPointer(
- holder, EncodeExternalPointer(ReinterpretCast<RawPtrT>(offset)));
+ StoreJSTypedArrayBasePointer(holder, base);
+ StoreJSTypedArrayExternalPointerPtr(holder, ReinterpretCast<RawPtrT>(offset));
}
void TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
@@ -384,7 +391,7 @@ void TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
SmiConstant(0));
base = RawPtrAdd(base, Signed(offset));
- StoreJSTypedArrayExternalPointer(holder, EncodeExternalPointer(base));
+ StoreJSTypedArrayExternalPointerPtr(holder, base);
}
void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromNumeric(
@@ -441,7 +448,7 @@ void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged(
// ES #sec-get-%typedarray%.prototype-@@tostringtag
TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Label if_receiverisheapobject(this), return_undefined(this);
Branch(TaggedIsSmi(receiver), &return_undefined, &if_receiverisheapobject);
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index 780c36123e..fcaa2f2a65 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -77,6 +77,7 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
void DispatchTypedArrayByElementsKind(
TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function);
+ void AllocateJSTypedArrayExternalPointerEntry(TNode<JSTypedArray> holder);
void SetJSTypedArrayOnHeapDataPtr(TNode<JSTypedArray> holder,
TNode<ByteArray> base,
TNode<UintPtrT> offset);
diff --git a/deps/v8/src/builtins/builtins-utils-gen.h b/deps/v8/src/builtins/builtins-utils-gen.h
index f9e2ba74fa..3a812b62b8 100644
--- a/deps/v8/src/builtins/builtins-utils-gen.h
+++ b/deps/v8/src/builtins/builtins-utils-gen.h
@@ -5,6 +5,7 @@
#ifndef V8_BUILTINS_BUILTINS_UTILS_GEN_H_
#define V8_BUILTINS_BUILTINS_UTILS_GEN_H_
+#include "include/cppgc/source-location.h"
#include "src/builtins/builtins-descriptors.h"
namespace v8 {
@@ -26,27 +27,35 @@ class CodeAssemblerState;
//
// In the body of the builtin function the arguments can be accessed
// as "Parameter(n)".
-#define TF_BUILTIN(Name, AssemblerBase) \
- class Name##Assembler : public AssemblerBase { \
- public: \
- using Descriptor = Builtin_##Name##_InterfaceDescriptor; \
- \
- explicit Name##Assembler(compiler::CodeAssemblerState* state) \
- : AssemblerBase(state) {} \
- void Generate##Name##Impl(); \
- \
- Node* Parameter(Descriptor::ParameterIndices index) { \
- return CodeAssembler::Parameter(static_cast<int>(index)); \
- } \
- }; \
- void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
- Name##Assembler assembler(state); \
- state->SetInitialDebugInformation(#Name, __FILE__, __LINE__); \
- if (Builtins::KindOf(Builtins::k##Name) == Builtins::TFJ) { \
- assembler.PerformStackCheck(assembler.GetJSContextParameter()); \
- } \
- assembler.Generate##Name##Impl(); \
- } \
+#define TF_BUILTIN(Name, AssemblerBase) \
+ class Name##Assembler : public AssemblerBase { \
+ public: \
+ using Descriptor = Builtin_##Name##_InterfaceDescriptor; \
+ \
+ explicit Name##Assembler(compiler::CodeAssemblerState* state) \
+ : AssemblerBase(state) {} \
+ void Generate##Name##Impl(); \
+ \
+ template <class T> \
+ TNode<T> Parameter( \
+ Descriptor::ParameterIndices index, \
+ cppgc::SourceLocation loc = cppgc::SourceLocation::Current()) { \
+ return CodeAssembler::Parameter<T>(static_cast<int>(index), loc); \
+ } \
+ \
+ template <class T> \
+ TNode<T> UncheckedParameter(Descriptor::ParameterIndices index) { \
+ return CodeAssembler::UncheckedParameter<T>(static_cast<int>(index)); \
+ } \
+ }; \
+ void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
+ Name##Assembler assembler(state); \
+ state->SetInitialDebugInformation(#Name, __FILE__, __LINE__); \
+ if (Builtins::KindOf(Builtins::k##Name) == Builtins::TFJ) { \
+ assembler.PerformStackCheck(assembler.GetJSContextParameter()); \
+ } \
+ assembler.Generate##Name##Impl(); \
+ } \
void Name##Assembler::Generate##Name##Impl()
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-utils-inl.h b/deps/v8/src/builtins/builtins-utils-inl.h
index 82d5fe2873..10f03a3d91 100644
--- a/deps/v8/src/builtins/builtins-utils-inl.h
+++ b/deps/v8/src/builtins/builtins-utils-inl.h
@@ -23,20 +23,12 @@ Handle<Object> BuiltinArguments::atOrUndefined(Isolate* isolate,
Handle<Object> BuiltinArguments::receiver() const { return at<Object>(0); }
Handle<JSFunction> BuiltinArguments::target() const {
-#ifdef V8_REVERSE_JSARGS
int index = kTargetOffset;
-#else
- int index = Arguments::length() - 1 - kTargetOffset;
-#endif
return Handle<JSFunction>(address_of_arg_at(index));
}
Handle<HeapObject> BuiltinArguments::new_target() const {
-#ifdef V8_REVERSE_JSARGS
int index = kNewTargetOffset;
-#else
- int index = Arguments::length() - 1 - kNewTargetOffset;
-#endif
return Handle<JSFunction>(address_of_arg_at(index));
}
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 3bed3bc651..e5f420a20d 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -52,12 +52,7 @@ class BuiltinArguments : public JavaScriptArguments {
static constexpr int kNumExtraArgs = 4;
static constexpr int kNumExtraArgsWithReceiver = 5;
-
-#ifdef V8_REVERSE_JSARGS
static constexpr int kArgsOffset = 4;
-#else
- static constexpr int kArgsOffset = 0;
-#endif
inline Handle<Object> atOrUndefined(Isolate* isolate, int index) const;
inline Handle<Object> receiver() const;
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index d4e92d165d..a996161e2f 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -44,12 +44,12 @@ TNode<FixedArray> WasmBuiltinsAssembler::LoadManagedObjectMapsFromInstance(
}
TF_BUILTIN(WasmFloat32ToNumber, WasmBuiltinsAssembler) {
- TNode<Float32T> val = UncheckedCast<Float32T>(Parameter(Descriptor::kValue));
+ auto val = UncheckedParameter<Float32T>(Descriptor::kValue);
Return(ChangeFloat32ToTagged(val));
}
TF_BUILTIN(WasmFloat64ToNumber, WasmBuiltinsAssembler) {
- TNode<Float64T> val = UncheckedCast<Float64T>(Parameter(Descriptor::kValue));
+ auto val = UncheckedParameter<Float64T>(Descriptor::kValue);
Return(ChangeFloat64ToTagged(val));
}
@@ -59,18 +59,14 @@ TF_BUILTIN(WasmI32AtomicWait32, WasmBuiltinsAssembler) {
return;
}
- TNode<Uint32T> address =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
+ auto address = UncheckedParameter<Uint32T>(Descriptor::kAddress);
TNode<Number> address_number = ChangeUint32ToTagged(address);
- TNode<Int32T> expected_value =
- UncheckedCast<Int32T>(Parameter(Descriptor::kExpectedValue));
+ auto expected_value = UncheckedParameter<Int32T>(Descriptor::kExpectedValue);
TNode<Number> expected_value_number = ChangeInt32ToTagged(expected_value);
- TNode<IntPtrT> timeout_low =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutLow));
- TNode<IntPtrT> timeout_high =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutHigh));
+ auto timeout_low = UncheckedParameter<IntPtrT>(Descriptor::kTimeoutLow);
+ auto timeout_high = UncheckedParameter<IntPtrT>(Descriptor::kTimeoutHigh);
TNode<BigInt> timeout = BigIntFromInt32Pair(timeout_low, timeout_high);
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
@@ -88,21 +84,18 @@ TF_BUILTIN(WasmI64AtomicWait32, WasmBuiltinsAssembler) {
return;
}
- TNode<Uint32T> address =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
+ auto address = UncheckedParameter<Uint32T>(Descriptor::kAddress);
TNode<Number> address_number = ChangeUint32ToTagged(address);
- TNode<IntPtrT> expected_value_low =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValueLow));
- TNode<IntPtrT> expected_value_high =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValueHigh));
+ auto expected_value_low =
+ UncheckedParameter<IntPtrT>(Descriptor::kExpectedValueLow);
+ auto expected_value_high =
+ UncheckedParameter<IntPtrT>(Descriptor::kExpectedValueHigh);
TNode<BigInt> expected_value =
BigIntFromInt32Pair(expected_value_low, expected_value_high);
- TNode<IntPtrT> timeout_low =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutLow));
- TNode<IntPtrT> timeout_high =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutHigh));
+ auto timeout_low = UncheckedParameter<IntPtrT>(Descriptor::kTimeoutLow);
+ auto timeout_high = UncheckedParameter<IntPtrT>(Descriptor::kTimeoutHigh);
TNode<BigInt> timeout = BigIntFromInt32Pair(timeout_low, timeout_high);
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
@@ -115,9 +108,9 @@ TF_BUILTIN(WasmI64AtomicWait32, WasmBuiltinsAssembler) {
}
TF_BUILTIN(WasmAllocateArrayWithRtt, WasmBuiltinsAssembler) {
- TNode<Map> map = CAST(Parameter(Descriptor::kMap));
- TNode<Smi> length = CAST(Parameter(Descriptor::kLength));
- TNode<Smi> element_size = CAST(Parameter(Descriptor::kElementSize));
+ auto map = Parameter<Map>(Descriptor::kMap);
+ auto length = Parameter<Smi>(Descriptor::kLength);
+ auto element_size = Parameter<Smi>(Descriptor::kElementSize);
TNode<IntPtrT> untagged_length = SmiUntag(length);
// instance_size = WasmArray::kHeaderSize
// + RoundUp(element_size * length, kObjectAlignment)
@@ -134,5 +127,27 @@ TF_BUILTIN(WasmAllocateArrayWithRtt, WasmBuiltinsAssembler) {
Return(result);
}
+TF_BUILTIN(WasmAllocatePair, WasmBuiltinsAssembler) {
+ TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
+ TNode<HeapObject> value1 = Parameter<HeapObject>(Descriptor::kValue1);
+ TNode<HeapObject> value2 = Parameter<HeapObject>(Descriptor::kValue2);
+
+ TNode<IntPtrT> roots = LoadObjectField<IntPtrT>(
+ instance, WasmInstanceObject::kIsolateRootOffset);
+ TNode<Map> map = CAST(Load(
+ MachineType::AnyTagged(), roots,
+ IntPtrConstant(IsolateData::root_slot_offset(RootIndex::kTuple2Map))));
+
+ TNode<IntPtrT> instance_size =
+ TimesTaggedSize(LoadMapInstanceSizeInWords(map));
+ TNode<Tuple2> result = UncheckedCast<Tuple2>(Allocate(instance_size));
+
+ StoreMap(result, map);
+ StoreObjectField(result, Tuple2::kValue1Offset, value1);
+ StoreObjectField(result, Tuple2::kValue2Offset, value2);
+
+ Return(result);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 31682f3974..541f9ffac9 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -335,7 +335,8 @@ class OffHeapTrampolineGenerator {
public:
explicit OffHeapTrampolineGenerator(Isolate* isolate)
: isolate_(isolate),
- masm_(isolate, CodeObjectRequired::kYes,
+ masm_(isolate, AssemblerOptions::DefaultForOffHeapTrampoline(isolate),
+ CodeObjectRequired::kYes,
ExternalAssemblerBuffer(buffer_, kBufferSize)) {}
CodeDesc Generate(Address off_heap_entry, TrampolineType type) {
@@ -347,6 +348,7 @@ class OffHeapTrampolineGenerator {
masm_.CodeEntry();
masm_.JumpToInstructionStream(off_heap_entry);
} else {
+ DCHECK_EQ(type, TrampolineType::kAbort);
masm_.Trap();
}
}
@@ -484,6 +486,7 @@ bool Builtins::CodeObjectIsExecutable(int builtin_index) {
case Builtins::kArgumentsAdaptorTrampoline:
case Builtins::kHandleApiCall:
case Builtins::kInstantiateAsmJs:
+ case Builtins::kGenericJSToWasmWrapper:
// TODO(delphick): Remove this when calls to it have the trampoline inlined
// or are converted to use kCallBuiltinPointer.
diff --git a/deps/v8/src/builtins/cast.tq b/deps/v8/src/builtins/cast.tq
index 1562b7b4dd..362086f879 100644
--- a/deps/v8/src/builtins/cast.tq
+++ b/deps/v8/src/builtins/cast.tq
@@ -158,6 +158,7 @@ macro Cast<A : type extends Object>(implicit context: Context)(o: Object): A
otherwise CastError;
}
+// This is required for casting MaybeObject to Object.
Cast<Smi>(o: Object): Smi
labels CastError {
return TaggedToSmi(o) otherwise CastError;
@@ -667,7 +668,19 @@ UnsafeCast<RegExpMatchInfo>(implicit context: Context)(o: Object):
return %RawDownCast<RegExpMatchInfo>(o);
}
-macro CastOrDefault<T: type, Arg: type, Default: type>(
- implicit context: Context)(x: Arg, default: Default): T|Default {
+macro UnsafeCast<A : type extends WeakHeapObject>(o: A|Object): A {
+ assert(IsWeakOrCleared(o));
+ return %RawDownCast<A>(o);
+}
+
+macro
+CastOrDefault<T: type, Arg: type, Default: type>(implicit context: Context)(
+ x: Arg, default: Default): T|Default {
return Cast<T>(x) otherwise return default;
}
+
+// This is required for casting MaybeObject to Object.
+Cast<Object>(o: Object): Object
+labels _CastError {
+ return o;
+}
diff --git a/deps/v8/src/builtins/constants-table-builder.cc b/deps/v8/src/builtins/constants-table-builder.cc
index 97565f2e37..25fa878634 100644
--- a/deps/v8/src/builtins/constants-table-builder.cc
+++ b/deps/v8/src/builtins/constants-table-builder.cc
@@ -46,15 +46,12 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
DCHECK(!object->IsCode());
#endif
- uint32_t* maybe_key = map_.Find(object);
- if (maybe_key == nullptr) {
+ auto find_result = map_.FindOrInsert(object);
+ if (!find_result.already_exists) {
DCHECK(object->IsHeapObject());
- uint32_t index = map_.size();
- map_.Set(object, index);
- return index;
- } else {
- return *maybe_key;
+ *find_result.entry = map_.size() - 1;
}
+ return *find_result.entry;
}
namespace {
@@ -85,7 +82,7 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference(
uint32_t key;
if (map_.Delete(self_reference, &key)) {
DCHECK(code_object->IsCode());
- map_.Set(code_object, key);
+ map_.Insert(code_object, key);
}
}
@@ -96,7 +93,7 @@ void BuiltinsConstantsTableBuilder::PatchBasicBlockCountersReference(
uint32_t key;
if (map_.Delete(ReadOnlyRoots(isolate_).basic_block_counters_marker(),
&key)) {
- map_.Set(counters, key);
+ map_.Insert(counters, key);
}
}
diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq
index 6ac9901028..6f6cbb1f68 100644
--- a/deps/v8/src/builtins/convert.tq
+++ b/deps/v8/src/builtins/convert.tq
@@ -115,6 +115,11 @@ FromConstexpr<IterationKind, constexpr IterationKind>(
return %RawDownCast<IterationKind>(Unsigned(%FromConstexpr<int32>(c)));
}
+FromConstexpr<string::TrimMode, string::constexpr TrimMode>(
+ c: string::constexpr TrimMode): string::TrimMode {
+ return %RawDownCast<string::TrimMode>(Unsigned(%FromConstexpr<int32>(c)));
+}
+
macro Convert<To: type, From: type>(i: From): To {
return i;
}
diff --git a/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc b/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc
index 8266807b43..7317402fd5 100644
--- a/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc
+++ b/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc
@@ -11,6 +11,9 @@ namespace v8 {
namespace internal {
namespace interpreter {
+const int kIllegalBytecodeHandler = -1;
+const int kIllegalBytecodeHandlerEncoding = 255;
+
void WriteBytecode(std::ofstream& out, Bytecode bytecode,
OperandScale operand_scale, int* count, int offset_table[],
int table_index) {
@@ -22,7 +25,7 @@ void WriteBytecode(std::ofstream& out, Bytecode bytecode,
offset_table[table_index] = *count;
(*count)++;
} else {
- offset_table[table_index] = -1;
+ offset_table[table_index] = kIllegalBytecodeHandler;
}
}
@@ -32,6 +35,7 @@ void WriteHeader(const char* header_filename) {
out << "// Automatically generated from interpreter/bytecodes.h\n"
<< "// The following list macro is used to populate the builtins list\n"
<< "// with the bytecode handlers\n\n"
+ << "#include <stdint.h>\n\n"
<< "#ifndef V8_BUILTINS_GENERATED_BYTECODES_BUILTINS_LIST\n"
<< "#define V8_BUILTINS_GENERATED_BYTECODES_BUILTINS_LIST\n\n"
<< "namespace v8 {\n"
@@ -60,19 +64,25 @@ void WriteHeader(const char* header_filename) {
CHECK_GT(single_count, wide_count);
CHECK_EQ(single_count, Bytecodes::kBytecodeCount);
CHECK_EQ(wide_count, extra_wide_count);
- out << "\n\nconst int kNumberOfBytecodeHandlers = " << single_count << ";\n"
- << "const int kNumberOfWideBytecodeHandlers = " << wide_count << ";\n\n"
- << "// Mapping from (Bytecode + OperandScaleAsIndex * |Bytecodes|) to\n"
- << "// a dense form with all the illegal Bytecode/OperandScale\n"
- << "// combinations removed. Used to index into the builtins table.\n"
- << "constexpr int kBytecodeToBuiltinsMapping[" << kTableSize << "] = {\n"
- << " ";
+ out << "\n\nconstexpr int kNumberOfBytecodeHandlers = " << single_count
+ << ";\n"
+ << "constexpr int kNumberOfWideBytecodeHandlers = " << wide_count
+ << ";\n\n"
+ << "constexpr uint8_t kIllegalBytecodeHandlerEncoding = "
+ << kIllegalBytecodeHandlerEncoding << ";\n\n"
+ << "// Mapping from Bytecode to a dense form with all the illegal\n"
+ << "// wide Bytecodes removed. Used to index into the builtins table.\n"
+ << "constexpr uint8_t kWideBytecodeToBuiltinsMapping["
+ << "kNumberOfBytecodeHandlers] = { \n";
- for (int i = 0; i < kTableSize; ++i) {
- if (i == single_count || i == 2 * single_count) {
- out << "\n ";
+ for (int i = single_count; i < 2 * single_count; ++i) {
+ int offset = offset_table[i];
+ if (offset == kIllegalBytecodeHandler) {
+ offset = kIllegalBytecodeHandlerEncoding;
+ } else {
+ offset -= single_count;
}
- out << offset_table[i] << ", ";
+ out << offset << ", ";
}
out << "};\n\n"
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 41181410b5..d5f82cd3d9 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -72,48 +72,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
-
-void CompareStackLimit(MacroAssembler* masm, Register with,
- StackLimitKind kind) {
- DCHECK(masm->root_array_available());
- Isolate* isolate = masm->isolate();
- // Address through the root register. No load is needed.
- ExternalReference limit =
- kind == StackLimitKind::kRealStackLimit
- ? ExternalReference::address_of_real_jslimit(isolate)
- : ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
-
- intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
- __ cmp(with, Operand(kRootRegister, offset));
-}
-
-void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch, Label* stack_overflow,
- bool include_receiver = false) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_jslimit(masm->isolate());
- // Compute the space that is left as a negative number in scratch. If
- // we already overflowed, this will be a positive number.
- __ mov(scratch, __ ExternalReferenceAsOperand(real_stack_limit, scratch));
- __ sub(scratch, esp);
- // Add the size of the arguments.
- static_assert(kSystemPointerSize == 4,
- "The next instruction assumes kSystemPointerSize == 4");
- __ lea(scratch, Operand(scratch, num_args, times_system_pointer_size, 0));
- if (include_receiver) {
- __ add(scratch, Immediate(kSystemPointerSize));
- }
- // See if we overflowed, i.e. scratch is positive.
- __ cmp(scratch, Immediate(0));
- __ j(greater, stack_overflow); // Signed comparison.
-}
-
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax: number of arguments
@@ -124,7 +82,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
Label stack_overflow;
- Generate_StackOverflowCheck(masm, eax, ecx, &stack_overflow);
+ __ StackOverflowCheck(eax, ecx, &stack_overflow);
// Enter a construct frame.
{
@@ -136,7 +94,11 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ push(eax);
__ SmiUntag(eax);
-#ifdef V8_REVERSE_JSARGS
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
+
// Set up pointer to first argument (skip receiver).
__ lea(esi, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize));
@@ -144,14 +106,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ PushArray(esi, eax, ecx);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-#else
- // The receiver for the builtin/api call.
- __ PushRoot(RootIndex::kTheHoleValue);
- // Set up pointer to last argument. We are using esi as scratch register.
- __ lea(esi, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
- // Copy arguments to the expression stack.
- __ PushArray(esi, eax, ecx);
-#endif
// Call the function.
// eax: number of arguments (untagged)
@@ -196,168 +150,133 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
+ FrameScope scope(masm, StackFrame::MANUAL);
// Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
- Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+ __ EnterFrame(StackFrame::CONSTRUCT);
- // Preserve the incoming parameters on the stack.
- __ mov(ecx, eax);
- __ SmiTag(ecx);
- __ Push(esi);
- __ Push(ecx);
- __ Push(edi);
- __ PushRoot(RootIndex::kTheHoleValue);
- __ Push(edx);
-
- // ----------- S t a t e -------------
- // -- sp[0*kSystemPointerSize]: new target
- // -- sp[1*kSystemPointerSize]: padding
- // -- edi and sp[2*kSystemPointerSize]: constructor function
- // -- sp[3*kSystemPointerSize]: argument count
- // -- sp[4*kSystemPointerSize]: context
- // -----------------------------------
-
- __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kFlagsOffset));
- __ DecodeField<SharedFunctionInfo::FunctionKindBits>(eax);
- __ JumpIfIsInRange(eax, kDefaultDerivedConstructor, kDerivedConstructor,
- ecx, &not_create_implicit_receiver, Label::kNear);
-
- // If not derived class constructor: Allocate the new receiver object.
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
- eax);
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ jmp(&post_instantiation_deopt_entry, Label::kNear);
-
- // Else: use TheHoleValue as receiver for constructor call
- __ bind(&not_create_implicit_receiver);
- __ LoadRoot(eax, RootIndex::kTheHoleValue);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
- // ----------- S t a t e -------------
- // -- eax: implicit receiver
- // -- Slot 4 / sp[0*kSystemPointerSize]: new target
- // -- Slot 3 / sp[1*kSystemPointerSize]: padding
- // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
- // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[4*kSystemPointerSize]: context
- // -----------------------------------
- // Deoptimizer enters here.
- masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
- masm->pc_offset());
- __ bind(&post_instantiation_deopt_entry);
+ // Preserve the incoming parameters on the stack.
+ __ mov(ecx, eax);
+ __ SmiTag(ecx);
+ __ Push(esi);
+ __ Push(ecx);
+ __ Push(edi);
+ __ PushRoot(RootIndex::kTheHoleValue);
+ __ Push(edx);
- // Restore new target.
- __ Pop(edx);
+ // ----------- S t a t e -------------
+ // -- sp[0*kSystemPointerSize]: new target
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- edi and sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: argument count
+ // -- sp[4*kSystemPointerSize]: context
+ // -----------------------------------
- // Push the allocated receiver to the stack.
- __ Push(eax);
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kFlagsOffset));
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(eax);
+ __ JumpIfIsInRange(eax, kDefaultDerivedConstructor, kDerivedConstructor, ecx,
+ &not_create_implicit_receiver, Label::kNear);
-#ifdef V8_REVERSE_JSARGS
- // We need two copies because we may have to return the original one
- // and the calling conventions dictate that the called function pops the
- // receiver. The second copy is pushed after the arguments, we saved in r8
- // since rax needs to store the number of arguments before
- // InvokingFunction.
- __ movd(xmm0, eax);
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
+ eax);
+ __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
+ __ jmp(&post_instantiation_deopt_entry, Label::kNear);
- // Set up pointer to first argument (skip receiver).
- __ lea(edi, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
- kSystemPointerSize));
-#else
- // We need two copies because we may have to return the original one
- // and the calling conventions dictate that the called function pops the
- // receiver.
- __ Push(eax);
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(eax, RootIndex::kTheHoleValue);
- // Set up pointer to last argument.
- __ lea(edi, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-#endif
-
- // Restore argument count.
- __ mov(eax, Operand(ebp, ConstructFrameConstants::kLengthOffset));
- __ SmiUntag(eax);
+ // ----------- S t a t e -------------
+ // -- eax: implicit receiver
+ // -- Slot 4 / sp[0*kSystemPointerSize]: new target
+ // -- Slot 3 / sp[1*kSystemPointerSize]: padding
+ // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kSystemPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
- // Check if we have enough stack space to push all arguments.
- // Argument count in eax. Clobbers ecx.
- Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, eax, ecx, &stack_overflow);
- __ jmp(&enough_stack_space);
+ // Restore new target.
+ __ Pop(edx);
- __ bind(&stack_overflow);
- // Restore context from the frame.
- __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kThrowStackOverflow);
- // This should be unreachable.
- __ int3();
+ // Push the allocated receiver to the stack.
+ __ Push(eax);
- __ bind(&enough_stack_space);
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments, we saved in r8
+ // since rax needs to store the number of arguments before
+ // InvokingFunction.
+ __ movd(xmm0, eax);
- // Copy arguments to the expression stack.
- __ PushArray(edi, eax, ecx);
+ // Set up pointer to first argument (skip receiver).
+ __ lea(edi, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
+ kSystemPointerSize));
-#ifdef V8_REVERSE_JSARGS
- // Push implicit receiver.
- __ movd(ecx, xmm0);
- __ Push(ecx);
-#endif
+ // Restore argument count.
+ __ mov(eax, Operand(ebp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(eax);
- // Restore and and call the constructor function.
- __ mov(edi, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
- __ InvokeFunction(edi, edx, eax, CALL_FUNCTION);
+ // Check if we have enough stack space to push all arguments.
+ // Argument count in eax. Clobbers ecx.
+ Label stack_overflow;
+ __ StackOverflowCheck(eax, ecx, &stack_overflow);
- // ----------- S t a t e -------------
- // -- eax: constructor result
- // -- sp[0*kSystemPointerSize]: implicit receiver
- // -- sp[1*kSystemPointerSize]: padding
- // -- sp[2*kSystemPointerSize]: constructor function
- // -- sp[3*kSystemPointerSize]: number of arguments
- // -- sp[4*kSystemPointerSize]: context
- // -----------------------------------
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
+ // Copy arguments to the expression stack.
+ __ PushArray(edi, eax, ecx);
- // Restore context from the frame.
- __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
+ // Push implicit receiver.
+ __ movd(ecx, xmm0);
+ __ Push(ecx);
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, do_throw, leave_frame;
+ // Restore and and call the constructor function.
+ __ mov(edi, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
+ __ InvokeFunction(edi, edx, eax, CALL_FUNCTION);
- // If the result is undefined, we jump out to using the implicit receiver.
- __ JumpIfRoot(eax, RootIndex::kUndefinedValue, &use_receiver, Label::kNear);
+ // ----------- S t a t e -------------
+ // -- eax: constructor result
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments
+ // -- sp[4*kSystemPointerSize]: context
+ // -----------------------------------
- // Otherwise we do a smi check and fall through to check if the return value
- // is a valid receiver.
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(eax, &use_receiver, Label::kNear);
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
- __ j(above_equal, &leave_frame, Label::kNear);
- __ jmp(&use_receiver, Label::kNear);
+ Label check_result, use_receiver, do_throw, leave_and_return;
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfNotRoot(eax, RootIndex::kUndefinedValue, &check_result,
+ Label::kNear);
- __ bind(&do_throw);
- __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ mov(eax, Operand(esp, 0 * kSystemPointerSize));
+ __ JumpIfRoot(eax, RootIndex::kTheHoleValue, &do_throw);
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ mov(eax, Operand(esp, 0 * kSystemPointerSize));
- __ JumpIfRoot(eax, RootIndex::kTheHoleValue, &do_throw);
+ __ bind(&leave_and_return);
+ // Restore smi-tagged arguments count from the frame.
+ __ mov(edx, Operand(ebp, ConstructFrameConstants::kLengthOffset));
+ __ LeaveFrame(StackFrame::CONSTRUCT);
- __ bind(&leave_frame);
- // Restore smi-tagged arguments count from the frame.
- __ mov(edx, Operand(ebp, ConstructFrameConstants::kLengthOffset));
- // Leave construct frame.
- }
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ pop(ecx);
@@ -365,6 +284,34 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
1 * kSystemPointerSize)); // 1 ~ receiver
__ push(ecx);
__ ret(0);
+
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+ __ bind(&check_result);
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(eax, &use_receiver, Label::kNear);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ j(above_equal, &leave_and_return, Label::kNear);
+ __ jmp(&use_receiver, Label::kNear);
+
+ __ bind(&do_throw);
+ // Restore context from the frame.
+ __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ // This should be unreachable.
+ __ int3();
+
+ __ bind(&stack_overflow);
+ // Restore context from the frame.
+ __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // This should be unreachable.
+ __ int3();
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
@@ -528,11 +475,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the function.
__ push(Operand(scratch1, EntryFrameConstants::kFunctionArgOffset));
-#ifndef V8_REVERSE_JSARGS
- // And the receiver onto the stack.
- __ push(Operand(scratch1, EntryFrameConstants::kReceiverArgOffset));
-#endif
-
// Load the number of arguments and setup pointer to the arguments.
__ mov(eax, Operand(scratch1, EntryFrameConstants::kArgcOffset));
__ mov(scratch1, Operand(scratch1, EntryFrameConstants::kArgvOffset));
@@ -540,7 +482,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments.
// Argument count in eax. Clobbers ecx.
Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, eax, ecx, &stack_overflow);
+ __ StackOverflowCheck(eax, ecx, &stack_overflow);
__ jmp(&enough_stack_space);
__ bind(&stack_overflow);
@@ -551,7 +493,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&enough_stack_space);
// Copy arguments to the stack in a loop.
-#ifdef V8_REVERSE_JSARGS
Label loop, entry;
__ Move(ecx, eax);
__ jmp(&entry, Label::kNear);
@@ -562,27 +503,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&entry);
__ dec(ecx);
__ j(greater_equal, &loop);
-#else
- Label loop, entry;
- __ Move(ecx, Immediate(0));
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- // Push the parameter from argv.
- __ mov(scratch2, Operand(scratch1, ecx, times_system_pointer_size, 0));
- __ push(Operand(scratch2, 0)); // dereference handle
- __ inc(ecx);
- __ bind(&entry);
- __ cmp(ecx, eax);
- __ j(not_equal, &loop);
-#endif
// Load the previous frame pointer to access C arguments
__ mov(scratch2, Operand(ebp, 0));
-#ifdef V8_REVERSE_JSARGS
// Push the receiver onto the stack.
__ push(Operand(scratch2, EntryFrameConstants::kReceiverArgOffset));
-#endif
// Get the new.target and function from the frame.
__ mov(edx, Operand(scratch2, EntryFrameConstants::kNewTargetArgOffset));
@@ -667,23 +593,17 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- CompareStackLimit(masm, esp, StackLimitKind::kRealStackLimit);
+ __ CompareStackLimit(esp, StackLimitKind::kRealStackLimit);
__ j(below, &stack_overflow);
// Pop return address.
__ PopReturnAddressTo(eax);
-#ifndef V8_REVERSE_JSARGS
- // Push receiver.
- __ Push(FieldOperand(edx, JSGeneratorObject::kReceiverOffset));
-#endif
-
// ----------- S t a t e -------------
// -- eax : return address
// -- edx : the JSGeneratorObject to resume
// -- edi : generator function
// -- esi : generator context
- // -- esp[0] : generator receiver, if V8_REVERSE_JSARGS is not set
// -----------------------------------
{
@@ -695,7 +615,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
ecx, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(ebx,
FieldOperand(edx, JSGeneratorObject::kParametersAndRegistersOffset));
-#ifdef V8_REVERSE_JSARGS
{
Label done_loop, loop;
__ mov(edi, ecx);
@@ -712,22 +631,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Push receiver.
__ Push(FieldOperand(edx, JSGeneratorObject::kReceiverOffset));
-#else
- {
- Label done_loop, loop;
- __ Set(edi, 0);
-
- __ bind(&loop);
- __ cmp(edi, ecx);
- __ j(greater_equal, &done_loop);
- __ Push(
- FieldOperand(ebx, edi, times_tagged_size, FixedArray::kHeaderSize));
- __ add(edi, Immediate(1));
- __ jmp(&loop);
-
- __ bind(&done_loop);
- }
-#endif
// Restore registers.
__ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
@@ -804,31 +707,47 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Register scratch2) {
- Register args_count = scratch1;
- Register return_pc = scratch2;
-
- // Get the arguments + receiver count.
- __ mov(args_count,
+ Register params_size = scratch1;
+ // Get the size of the formal parameters + receiver (in bytes).
+ __ mov(params_size,
Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ mov(args_count,
- FieldOperand(args_count, BytecodeArray::kParameterSizeOffset));
+ __ mov(params_size,
+ FieldOperand(params_size, BytecodeArray::kParameterSizeOffset));
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ Register actual_params_size = scratch2;
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ mov(actual_params_size, Operand(ebp, StandardFrameConstants::kArgCOffset));
+ __ lea(actual_params_size,
+ Operand(actual_params_size, times_system_pointer_size,
+ kSystemPointerSize));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label corrected_args_count;
+ __ cmp(params_size, actual_params_size);
+ __ j(greater_equal, &corrected_args_count, Label::kNear);
+ __ mov(params_size, actual_params_size);
+ __ bind(&corrected_args_count);
+#endif
// Leave the frame (also dropping the register file).
__ leave();
// Drop receiver + arguments.
- __ pop(return_pc);
- __ add(esp, args_count);
- __ push(return_pc);
+ Register return_pc = scratch2;
+ __ PopReturnAddressTo(return_pc);
+ __ add(esp, params_size);
+ __ PushReturnAddressFrom(return_pc);
}
-// Tail-call |function_id| if |smi_entry| == |marker|
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register smi_entry,
- OptimizationMarker marker,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ cmp(smi_entry, Immediate(Smi::FromEnum(marker)));
+ __ cmp(actual_marker, expected_marker);
__ j(not_equal, &no_match, Label::kNear);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -844,17 +763,22 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(edx, edi, optimized_code_entry));
Register closure = edi;
+ __ movd(xmm0, eax);
+ __ movd(xmm1, edx);
- __ push(edx);
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, &heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, bailout to a
// given label.
- Label found_deoptimized_code;
__ mov(eax,
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
- __ j(not_zero, &found_deoptimized_code);
+ __ j(not_zero, &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code.
@@ -862,14 +786,17 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
eax);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ LoadCodeObjectEntry(ecx, optimized_code_entry);
- __ pop(edx);
+ __ movd(edx, xmm1);
+ __ movd(eax, xmm0);
__ jmp(ecx);
- // Optimized code slot contains deoptimized code, evict it and re-enter
- // the closure's code.
- __ bind(&found_deoptimized_code);
- __ pop(edx);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ __ movd(edx, xmm1);
+ __ movd(eax, xmm0);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
static void MaybeOptimizeCode(MacroAssembler* masm,
@@ -895,15 +822,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
- {
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
- if (FLAG_debug_code) {
- __ cmp(
- optimization_marker,
- Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
- __ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
- }
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
+ if (FLAG_debug_code) {
+ __ int3();
}
}
@@ -1031,18 +954,22 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &push_stack_frame);
- // Read off the optimized code slot in the feedback vector.
- // Load the optimized code from the feedback vector and re-use the register.
- Register optimized_code_entry = ecx;
- __ mov(optimized_code_entry,
- FieldOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
-
- // Check if the optimized code slot is not empty.
- Label optimized_code_slot_not_empty;
- __ cmp(optimized_code_entry,
- Immediate(Smi::FromEnum(OptimizationMarker::kNone)));
- __ j(not_equal, &optimized_code_slot_not_empty);
+ // Load the optimization state from the feedback vector and re-use the
+ // register.
+ Register optimization_state = ecx;
+ // Store feedback_vector. We may need it if we need to load the optimze code
+ // slot entry.
+ __ movd(xmm1, feedback_vector);
+ __ mov(optimization_state,
+ FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+
+ // Check if there is optimized code or a optimization marker that needes to be
+ // processed.
+ Label has_optimized_code_or_marker;
+ __ test(
+ optimization_state,
+ Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ j(not_zero, &has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
@@ -1108,7 +1035,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ mov(eax, esp);
__ sub(eax, frame_size);
- CompareStackLimit(masm, eax, StackLimitKind::kRealStackLimit);
+ __ CompareStackLimit(eax, StackLimitKind::kRealStackLimit);
__ j(below, &stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
@@ -1139,7 +1066,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
- CompareStackLimit(masm, esp, StackLimitKind::kInterruptStackLimit);
+ __ CompareStackLimit(esp, StackLimitKind::kInterruptStackLimit);
__ j(below, &stack_check_interrupt);
__ bind(&after_stack_check_interrupt);
@@ -1214,21 +1141,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&optimized_code_slot_not_empty);
+ __ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
// Restore actual argument count.
__ movd(eax, xmm0);
- // Check if optimized code marker is actually a weak reference to the
- // optimized code as opposed to an optimization marker.
- __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
- MaybeOptimizeCode(masm, optimized_code_entry);
+
+ // Check if optimized code is available
+ __ test(
+ optimization_state,
+ Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ j(zero, &maybe_has_optimized_code);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
- // Load code entry from the weak reference, if it was cleared, resume
- // execution of unoptimized code.
- __ LoadWeakValue(optimized_code_entry, &not_optimized);
+ Register optimized_code_entry = optimization_marker;
+ __ movd(optimized_code_entry, xmm1);
+ __ mov(
+ optimized_code_entry,
+ FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry);
__ bind(&compile_lazy);
@@ -1253,19 +1188,11 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Label loop_header, loop_check;
__ jmp(&loop_check);
__ bind(&loop_header);
-#ifdef V8_REVERSE_JSARGS
__ Push(Operand(array_limit, 0));
__ bind(&loop_check);
__ add(array_limit, Immediate(kSystemPointerSize));
__ cmp(array_limit, start_address);
__ j(below_equal, &loop_header, Label::kNear);
-#else
- __ Push(Operand(start_address, 0));
- __ sub(start_address, Immediate(kSystemPointerSize));
- __ bind(&loop_check);
- __ cmp(start_address, array_limit);
- __ j(above, &loop_header, Label::kNear);
-#endif
}
// static
@@ -1285,16 +1212,13 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
const Register argv = ecx;
Label stack_overflow;
-
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ dec(eax);
}
-#endif
// Add a stack check before pushing the arguments.
- Generate_StackOverflowCheck(masm, eax, scratch, &stack_overflow, true);
+ __ StackOverflowCheck(eax, scratch, &stack_overflow, true);
__ movd(xmm0, eax); // Spill number of arguments.
@@ -1304,7 +1228,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(eax);
-#ifdef V8_REVERSE_JSARGS
if (receiver_mode != ConvertReceiverMode::kNullOrUndefined) {
__ add(scratch, Immediate(1)); // Add one for receiver.
}
@@ -1328,34 +1251,12 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ PushRoot(RootIndex::kUndefinedValue);
}
-#else
- __ add(scratch, Immediate(1)); // Add one for receiver.
-
- // Push "undefined" as the receiver arg if we need to.
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(RootIndex::kUndefinedValue);
- __ sub(scratch, Immediate(1)); // Subtract one for receiver.
- }
-
- // Find the address of the last argument.
- __ shl(scratch, kSystemPointerSizeLog2);
- __ neg(scratch);
- __ add(scratch, argv);
- Generate_InterpreterPushArgs(masm, scratch, argv);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(ecx); // Pass the spread in a register
- }
-#endif
__ PushReturnAddressFrom(eax);
__ movd(eax, xmm0); // Restore number of arguments.
// Call the target.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
-#ifndef V8_REVERSE_JSARGS
- __ sub(eax, Immediate(1)); // Subtract one for spread
-#endif
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1395,7 +1296,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// | addtl. slot | | receiver slot |
// Check for stack overflow before we increment the stack pointer.
- Generate_StackOverflowCheck(masm, num_args, scratch1, stack_overflow, true);
+ __ StackOverflowCheck(num_args, scratch1, stack_overflow, true);
// Step 1 - Update the stack pointer.
@@ -1416,7 +1317,6 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// Step 3 copy arguments to correct locations.
// Slot meant for receiver contains return address. Reset it so that
// we will not incorrectly interpret return address as an object.
-#ifdef V8_REVERSE_JSARGS
__ mov(Operand(esp, (num_slots_to_move + 1) * kSystemPointerSize),
Immediate(0));
__ mov(scratch1, Immediate(0));
@@ -1433,26 +1333,6 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
__ inc(scratch1);
__ cmp(scratch1, eax);
__ j(less_equal, &loop_header, Label::kNear);
-
-#else
- __ mov(Operand(esp, num_args, times_system_pointer_size,
- (num_slots_to_move + 1) * kSystemPointerSize),
- Immediate(0));
- __ mov(scratch1, num_args);
-
- Label loop_header, loop_check;
- __ jmp(&loop_check);
- __ bind(&loop_header);
- __ mov(scratch2, Operand(start_addr, 0));
- __ mov(Operand(esp, scratch1, times_system_pointer_size,
- num_slots_to_move * kSystemPointerSize),
- scratch2);
- __ sub(start_addr, Immediate(kSystemPointerSize));
- __ sub(scratch1, Immediate(1));
- __ bind(&loop_check);
- __ cmp(scratch1, Immediate(0));
- __ j(greater, &loop_header, Label::kNear);
-#endif
}
} // anonymous namespace
@@ -1472,12 +1352,10 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// -----------------------------------
Label stack_overflow;
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ dec(eax);
}
-#endif
// Push arguments and move return address and stack spill slots to the top of
// stack. The eax register is readonly. The ecx register will be modified. edx
@@ -1513,17 +1391,10 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ Drop(1); // The allocation site is unused.
__ Pop(kJavaScriptCallNewTargetRegister);
__ Pop(kJavaScriptCallTargetRegister);
-#ifdef V8_REVERSE_JSARGS
// Pass the spread in the register ecx, overwriting ecx.
__ mov(ecx, Operand(ecx, 0));
__ PushReturnAddressFrom(eax);
__ movd(eax, xmm0); // Reload number of arguments.
-#else
- __ Pop(ecx); // Pop the spread (i.e. the first argument), overwriting ecx.
- __ PushReturnAddressFrom(eax);
- __ movd(eax, xmm0); // Reload number of arguments.
- __ sub(eax, Immediate(1)); // The actual argc thus decrements by one.
-#endif
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1680,7 +1551,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
const RegisterConfiguration* config(RegisterConfiguration::Default());
int allocatable_register_count = config->num_allocatable_general_registers();
if (with_result) {
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin) {
// xmm0 is not included in the allocateable registers.
__ movd(xmm0, eax);
@@ -1693,14 +1563,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
BuiltinContinuationFrameConstants::kFixedFrameSize),
eax);
}
-#else
- // Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point.
- __ mov(Operand(esp, config->num_allocatable_general_registers() *
- kSystemPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize),
- eax);
-#endif
}
// Replace the builtin index Smi on the stack with the start address of the
@@ -1718,7 +1580,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ SmiUntag(Register::from_code(code));
}
}
-#ifdef V8_REVERSE_JSARGS
if (with_result && java_script_builtin) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. eax contains the arguments count, the return value
@@ -1727,7 +1588,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
BuiltinContinuationFrameConstants::kFixedFrameSize),
xmm0);
}
-#endif
__ mov(
ebp,
Operand(esp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
@@ -1775,10 +1635,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
- // The order of args depends on V8_REVERSE_JSARGS
- // -- args[0] : receiver
- // -- args[1] : thisArg
- // -- args[2] : argArray
+ // -- esp[1] : receiver
+ // -- esp[2] : thisArg
+ // -- esp[3] : argArray
// -----------------------------------
// 1. Load receiver into xmm0, argArray into edx (if present), remove all
@@ -1845,15 +1704,13 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Stack Layout:
// esp[0] : Return address
- // esp[8] : Argument n
- // esp[16] : Argument n-1
+ // esp[8] : Argument 0 (receiver: callable to call)
+ // esp[16] : Argument 1
// ...
- // esp[8 * n] : Argument 1
- // esp[8 * (n + 1)] : Receiver (callable to call)
- // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
+ // esp[8 * n] : Argument n-1
+ // esp[8 * (n + 1)] : Argument n
// eax contains the number of arguments, n, not counting the receiver.
-#ifdef V8_REVERSE_JSARGS
// 1. Get the callable to call (passed as receiver) from the stack.
{
StackArgumentsAccessor args(eax);
@@ -1878,43 +1735,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// original callable), making the original first argument the new receiver.
__ PushReturnAddressFrom(edx);
__ dec(eax); // One fewer argument (first argument is new receiver).
-#else
- // 1. Make sure we have at least one argument.
- {
- Label done;
- __ test(eax, eax);
- __ j(not_zero, &done, Label::kNear);
- __ PopReturnAddressTo(edx);
- __ PushRoot(RootIndex::kUndefinedValue);
- __ PushReturnAddressFrom(edx);
- __ inc(eax);
- __ bind(&done);
- }
-
- // 2. Get the callable to call (passed as receiver) from the stack.
- {
- StackArgumentsAccessor args(eax);
- __ mov(edi, args.GetReceiverOperand());
- }
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- {
- Label loop;
- __ mov(ecx, eax);
- __ bind(&loop);
- __ mov(edx, Operand(esp, ecx, times_system_pointer_size, 0));
- __ mov(Operand(esp, ecx, times_system_pointer_size, kSystemPointerSize),
- edx);
- __ dec(ecx);
- __ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(edx); // Discard copy of return address.
- __ dec(eax); // One fewer argument (first argument is new receiver).
- }
-#endif
-
- // 4. Call the callable.
+ // 5. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -1922,11 +1744,10 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
- // The order of args depends on V8_REVERSE_JSARGS
- // -- args[0] : receiver
- // -- args[1] : target
- // -- args[2] : thisArgument
- // -- args[3] : argumentsList
+ // -- esp[4] : receiver
+ // -- esp[8] : target (if argc >= 1)
+ // -- esp[12] : thisArgument (if argc >= 2)
+ // -- esp[16] : argumentsList (if argc == 3)
// -----------------------------------
// 1. Load target into edi (if present), argumentsList into edx (if present),
@@ -1981,11 +1802,10 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
- // The order of args depends on V8_REVERSE_JSARGS
- // -- args[0] : receiver
- // -- args[1] : target
- // -- args[2] : argumentsList
- // -- args[3] : new.target (optional)
+ // -- esp[4] : receiver
+ // -- esp[8] : target
+ // -- esp[12] : argumentsList
+ // -- esp[16] : new.target (optional)
// -----------------------------------
// 1. Load target into edi (if present), argumentsList into ecx (if present),
@@ -2126,9 +1946,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- Generate_StackOverflowCheck(masm, kArgumentsLength, edx, &stack_overflow);
+ __ StackOverflowCheck(kArgumentsLength, edx, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
__ movd(xmm4, kArgumentsList); // Spill the arguments list.
// Move the arguments already in the stack,
@@ -2177,29 +1996,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ jmp(&loop);
__ bind(&done);
}
-#else // !V8_REVERSE_JSARGS
- // Push additional arguments onto the stack.
- {
- __ PopReturnAddressTo(edx);
- __ Move(eax, Immediate(0));
- Label done, push, loop;
- __ bind(&loop);
- __ cmp(eax, kArgumentsLength);
- __ j(equal, &done, Label::kNear);
- // Turn the hole into undefined as we go.
- __ mov(edi, FieldOperand(kArgumentsList, eax, times_tagged_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(edi, RootIndex::kTheHoleValue);
- __ j(not_equal, &push, Label::kNear);
- __ LoadRoot(edi, RootIndex::kUndefinedValue);
- __ bind(&push);
- __ Push(edi);
- __ inc(eax);
- __ jmp(&loop);
- __ bind(&done);
- __ PushReturnAddressFrom(edx);
- }
-#endif // !V8_REVERSE_JSARGS
// Restore eax, edi and edx.
__ movd(esi, xmm3); // Restore the context.
@@ -2255,6 +2051,12 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ movd(xmm1, edx); // Preserve new.target (in case of [[Construct]]).
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
+ // code is erased.
+ __ mov(scratch, ebp);
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kArgCOffset));
+#else
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ mov(scratch, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2277,6 +2079,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ SmiUntag(edx);
}
__ bind(&arguments_done);
+#endif
Label stack_done, stack_overflow;
__ sub(edx, ecx);
@@ -2294,9 +2097,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -----------------------------------
// Forward the arguments from the caller frame.
-#ifdef V8_REVERSE_JSARGS
__ movd(xmm2, edi); // Preserve the target to call.
- Generate_StackOverflowCheck(masm, edx, edi, &stack_overflow);
+ __ StackOverflowCheck(edx, edi, &stack_overflow);
__ movd(xmm3, ebx); // Preserve root register.
Register scratch = ebx;
@@ -2350,20 +2152,6 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ movd(ebx, xmm3); // Restore root register.
__ movd(edi, xmm2); // Restore the target to call.
-#else
- Generate_StackOverflowCheck(masm, edx, ecx, &stack_overflow);
- Label loop;
- __ add(eax, edx);
- __ PopReturnAddressTo(ecx);
- __ bind(&loop);
- {
- __ dec(edx);
- __ Push(Operand(scratch, edx, times_system_pointer_size,
- kFPOnStackSize + kPCOnStackSize));
- __ j(not_zero, &loop);
- }
- __ PushReturnAddressFrom(ecx);
-#endif
}
__ bind(&stack_done);
@@ -2374,9 +2162,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
__ bind(&stack_overflow);
-#ifdef V8_REVERSE_JSARGS
__ movd(edi, xmm2); // Restore the target to call.
-#endif
__ movd(esi, xmm0); // Restore the context.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
}
@@ -2504,7 +2290,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ SmiUntag(edx);
__ test(edx, edx);
__ j(zero, &no_bound_arguments);
-#ifdef V8_REVERSE_JSARGS
{
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
@@ -2517,7 +2302,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Check the stack for overflow.
{
Label done, stack_overflow;
- Generate_StackOverflowCheck(masm, edx, ecx, &stack_overflow);
+ __ StackOverflowCheck(edx, ecx, &stack_overflow);
__ jmp(&done);
__ bind(&stack_overflow);
{
@@ -2564,85 +2349,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Restore context.
__ movd(esi, xmm3);
}
-#else // !V8_REVERSE_JSARGS
- {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- xmm0 : new.target (only in case of [[Construct]])
- // -- edi : target (checked to be a JSBoundFunction)
- // -- ecx : the [[BoundArguments]] (implemented as FixedArray)
- // -- edx : the number of [[BoundArguments]]
- // -----------------------------------
-
- // Reserve stack space for the [[BoundArguments]].
- {
- Label done;
- __ lea(ecx, Operand(edx, times_system_pointer_size, 0));
- __ sub(esp, ecx); // Not Windows-friendly, but corrected below.
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack
- // limit".
- CompareStackLimit(masm, esp, StackLimitKind::kRealStackLimit);
- __ j(above_equal, &done, Label::kNear);
- // Restore the stack pointer.
- __ lea(esp, Operand(esp, edx, times_system_pointer_size, 0));
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- }
- __ bind(&done);
- }
-#if V8_OS_WIN
- // Correctly allocate the stack space that was checked above.
- {
- Label win_done;
- __ cmp(ecx, TurboAssemblerBase::kStackPageSize);
- __ j(less_equal, &win_done, Label::kNear);
- // Reset esp and walk through the range touching every page.
- __ lea(esp, Operand(esp, edx, times_system_pointer_size, 0));
- __ AllocateStackSpace(ecx);
- __ bind(&win_done);
- }
-#endif
-
- // Adjust effective number of arguments to include return address.
- __ inc(eax);
-
- // Relocate arguments and return address down the stack.
- {
- Label loop;
- __ Set(ecx, 0);
- __ lea(edx, Operand(esp, edx, times_system_pointer_size, 0));
- __ bind(&loop);
- __ movd(xmm1, Operand(edx, ecx, times_system_pointer_size, 0));
- __ movd(Operand(esp, ecx, times_system_pointer_size, 0), xmm1);
- __ inc(ecx);
- __ cmp(ecx, eax);
- __ j(less, &loop);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop;
- __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
- __ mov(edx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ SmiUntag(edx);
- __ bind(&loop);
- __ dec(edx);
- __ movd(xmm1, FieldOperand(ecx, edx, times_tagged_size,
- FixedArray::kHeaderSize));
- __ movd(Operand(esp, eax, times_system_pointer_size, 0), xmm1);
- __ lea(eax, Operand(eax, 1));
- __ j(greater, &loop);
- }
-
- // Adjust effective number of arguments (eax contains the number of
- // arguments from the call plus return address plus the number of
- // [[BoundArguments]]), so we need to subtract one for the return address.
- __ dec(eax);
- }
-#endif // !V8_REVERSE_JSARGS
__ bind(&no_bound_arguments);
__ movd(edx, xmm0); // Reload edx.
@@ -2865,16 +2571,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
EnterArgumentsAdaptorFrame(masm);
// edi is used as a scratch register. It should be restored from the frame
// when needed.
- Generate_StackOverflowCheck(masm, kExpectedNumberOfArgumentsRegister, edi,
- &stack_overflow);
+ __ StackOverflowCheck(kExpectedNumberOfArgumentsRegister, edi,
+ &stack_overflow);
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
-#ifdef V8_REVERSE_JSARGS
__ lea(edi, Operand(ebp, ecx, times_system_pointer_size, offset));
-#else
- __ lea(edi, Operand(ebp, eax, times_system_pointer_size, offset));
-#endif
__ mov(eax, -1); // account for receiver
Label copy;
@@ -2893,13 +2595,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
EnterArgumentsAdaptorFrame(masm);
// edi is used as a scratch register. It should be restored from the frame
// when needed.
- Generate_StackOverflowCheck(masm, kExpectedNumberOfArgumentsRegister, edi,
- &stack_overflow);
+ __ StackOverflowCheck(kExpectedNumberOfArgumentsRegister, edi,
+ &stack_overflow);
// Remember expected arguments in xmm0.
__ movd(xmm0, kExpectedNumberOfArgumentsRegister);
-#ifdef V8_REVERSE_JSARGS
// Remember new target.
__ movd(xmm1, edx);
@@ -2927,32 +2628,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Restore new.target
__ movd(edx, xmm1);
-#else // !V8_REVERSE_JSARGS
- // Copy receiver and all actual arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(edi, Operand(ebp, eax, times_system_pointer_size, offset));
- // ecx = expected - actual.
- __ sub(kExpectedNumberOfArgumentsRegister, eax);
- // eax = -actual - 1
- __ neg(eax);
- __ sub(eax, Immediate(1));
-
- Label copy;
- __ bind(&copy);
- __ inc(eax);
- __ push(Operand(edi, 0));
- __ sub(edi, Immediate(kSystemPointerSize));
- __ test(eax, eax);
- __ j(not_zero, &copy);
-
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ bind(&fill);
- __ inc(eax);
- __ Push(Immediate(masm->isolate()->factory()->undefined_value()));
- __ cmp(eax, kExpectedNumberOfArgumentsRegister);
- __ j(less, &fill);
-#endif // !V8_REVERSE_JSARGS
// Restore expected arguments.
__ movd(eax, xmm0);
@@ -3539,12 +3214,12 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- eax : call data
// -- edi : holder
// -- esp[0] : return address
- // -- esp[4] : last argument
+ // -- esp[8] : argument 0 (receiver)
+ // -- esp[16] : argument 1
// -- ...
- // -- esp[argc * 4] : first argument
- // -- esp[(argc + 1) * 4] : receiver
+ // -- esp[argc * 8] : argument (argc - 1)
+ // -- esp[(argc + 1) * 8] : argument argc
// -----------------------------------
- // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
Register api_function_address = edx;
Register argc = ecx;
@@ -3614,13 +3289,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
-#ifdef V8_REVERSE_JSARGS
__ lea(scratch,
Operand(scratch, (FCA::kArgsLength + 1) * kSystemPointerSize));
-#else
- __ lea(scratch, Operand(scratch, argc, times_system_pointer_size,
- (FCA::kArgsLength - 1) * kSystemPointerSize));
-#endif
__ mov(ApiParameterOperand(kApiArgc + 1), scratch);
// FunctionCallbackInfo::length_.
@@ -4118,6 +3788,205 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
MemMoveEmitPopAndReturn(masm);
}
+namespace {
+
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Save all general purpose registers before messing with them.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
+ __ AllocateStackSpace(kDoubleRegsSize);
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ XMMRegister xmm_reg = XMMRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ movsd(Operand(esp, offset), xmm_reg);
+ }
+
+ __ pushad();
+
+ ExternalReference c_entry_fp_address =
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate);
+ __ mov(masm->ExternalReferenceAsOperand(c_entry_fp_address, esi), ebp);
+
+ const int kSavedRegistersAreaSize =
+ kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
+
+ // Get the address of the location in the code object
+ // and compute the fp-to-sp delta in register edx.
+ __ mov(ecx, Operand(esp, kSavedRegistersAreaSize));
+ __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kSystemPointerSize));
+
+ __ sub(edx, ebp);
+ __ neg(edx);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(6, eax);
+ __ mov(eax, Immediate(0));
+ Label context_check;
+ __ mov(edi, Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(edi, &context_check);
+ __ mov(eax, Operand(ebp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ mov(Operand(esp, 0 * kSystemPointerSize), eax); // Function.
+ __ mov(Operand(esp, 1 * kSystemPointerSize),
+ Immediate(static_cast<int>(deopt_kind)));
+ __ mov(Operand(esp, 2 * kSystemPointerSize),
+ Immediate(Deoptimizer::kFixedExitSizeMarker)); // Bailout id.
+ __ mov(Operand(esp, 3 * kSystemPointerSize), ecx); // Code address or 0.
+ __ mov(Operand(esp, 4 * kSystemPointerSize), edx); // Fp-to-sp delta.
+ __ Move(Operand(esp, 5 * kSystemPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve deoptimizer object in register eax and get the input
+ // frame descriptor pointer.
+ __ mov(esi, Operand(eax, Deoptimizer::input_offset()));
+
+ // Fill in the input registers.
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ __ pop(Operand(esi, offset));
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Fill in the double input registers.
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize;
+ __ movsd(xmm0, Operand(esp, src_offset));
+ __ movsd(Operand(esi, dst_offset), xmm0);
+ }
+
+ // Clear FPU all exceptions.
+ // TODO(ulan): Find out why the TOP register is not zero here in some cases,
+ // and check that the generated code never deoptimizes with unbalanced stack.
+ __ fnclex();
+
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ __ mov_b(__ ExternalReferenceAsOperand(
+ ExternalReference::stack_is_iterable_address(isolate), edx),
+ Immediate(0));
+
+ // Remove the return address and the double registers.
+ __ add(esp, Immediate(kDoubleRegsSize + 1 * kSystemPointerSize));
+
+ // Compute a pointer to the unwinding limit in register ecx; that is
+ // the first stack slot not part of the input frame.
+ __ mov(ecx, Operand(esi, FrameDescription::frame_size_offset()));
+ __ add(ecx, esp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ lea(edx, Operand(esi, FrameDescription::frame_content_offset()));
+ Label pop_loop_header;
+ __ jmp(&pop_loop_header);
+ Label pop_loop;
+ __ bind(&pop_loop);
+ __ pop(Operand(edx, 0));
+ __ add(edx, Immediate(sizeof(uint32_t)));
+ __ bind(&pop_loop_header);
+ __ cmp(ecx, esp);
+ __ j(not_equal, &pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ push(eax);
+ __ PrepareCallCFunction(1, esi);
+ __ mov(Operand(esp, 0 * kSystemPointerSize), eax);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ pop(eax);
+
+ __ mov(esp, Operand(eax, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: eax = current FrameDescription**, edx = one
+ // past the last FrameDescription**.
+ __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
+ __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
+ __ lea(edx, Operand(eax, edx, times_system_pointer_size, 0));
+ __ jmp(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: esi = current FrameDescription*, ecx = loop
+ // index.
+ __ mov(esi, Operand(eax, 0));
+ __ mov(ecx, Operand(esi, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ sub(ecx, Immediate(sizeof(uint32_t)));
+ __ push(Operand(esi, ecx, times_1, FrameDescription::frame_content_offset()));
+ __ bind(&inner_loop_header);
+ __ test(ecx, ecx);
+ __ j(not_zero, &inner_push_loop);
+ __ add(eax, Immediate(kSystemPointerSize));
+ __ bind(&outer_loop_header);
+ __ cmp(eax, edx);
+ __ j(below, &outer_push_loop);
+
+ // In case of a failed STUB, we have to restore the XMM registers.
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ XMMRegister xmm_reg = XMMRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ movsd(xmm_reg, Operand(esi, src_offset));
+ }
+
+ // Push pc and continuation from the last output frame.
+ __ push(Operand(esi, FrameDescription::pc_offset()));
+ __ push(Operand(esi, FrameDescription::continuation_offset()));
+
+ // Push the registers from the last output frame.
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ __ push(Operand(esi, offset));
+ }
+
+ __ mov_b(__ ExternalReferenceAsOperand(
+ ExternalReference::stack_is_iterable_address(isolate), edx),
+ Immediate(1));
+
+ // Restore the registers from the stack.
+ __ popad();
+
+ __ InitializeRootRegister();
+
+ // Return to the continuation point.
+ __ ret(0);
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/ic-dynamic-map-checks.tq b/deps/v8/src/builtins/ic-dynamic-map-checks.tq
new file mode 100644
index 0000000000..745ab711c1
--- /dev/null
+++ b/deps/v8/src/builtins/ic-dynamic-map-checks.tq
@@ -0,0 +1,155 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be:
+// Context found in the LICENSE file.
+
+namespace ic {
+
+const kSuccess: constexpr int32
+ generates 'static_cast<int>(DynamicMapChecksStatus::kSuccess)';
+const kBailout: constexpr int32
+ generates 'static_cast<int>(DynamicMapChecksStatus::kBailout)';
+const kDeopt: constexpr int32
+ generates 'static_cast<int>(DynamicMapChecksStatus::kDeopt)';
+extern runtime TryMigrateInstance(implicit context: Context)(Object): Object;
+extern macro LoadFeedbackVectorForStub(): FeedbackVector;
+
+macro PerformMapAndHandlerCheck(
+ entry: constexpr int32, polymorphicArray: WeakFixedArray,
+ weakActualMap: WeakHeapObject,
+ actualHandler: Smi|DataHandler): void labels Next,
+ Deopt {
+ const mapIndex = FeedbackIteratorMapIndexForEntry(entry);
+ assert(mapIndex < polymorphicArray.length_intptr);
+
+ const maybeCachedMap = UnsafeCast<WeakHeapObject>(polymorphicArray[mapIndex]);
+ if (maybeCachedMap != weakActualMap) {
+ goto Next;
+ }
+
+ const handlerIndex = FeedbackIteratorHandlerIndexForEntry(entry);
+ assert(handlerIndex < polymorphicArray.length_intptr);
+ const maybeHandler =
+ Cast<Object>(polymorphicArray[handlerIndex]) otherwise unreachable;
+ if (TaggedNotEqual(maybeHandler, actualHandler)) {
+ goto Deopt;
+ }
+}
+
+macro PerformPolymorphicCheck(
+ expectedPolymorphicArray: HeapObject, actualMap: Map,
+ actualHandler: Smi|DataHandler): int32 {
+ if (!Is<WeakFixedArray>(expectedPolymorphicArray)) {
+ return kDeopt;
+ }
+ try {
+ const polymorphicArray =
+ UnsafeCast<WeakFixedArray>(expectedPolymorphicArray);
+ const weakActualMap = MakeWeak(actualMap);
+ const length = polymorphicArray.length_intptr;
+ assert(length > 0);
+
+ try {
+ if (length >= FeedbackIteratorSizeFor(4)) goto Len4;
+ if (length == FeedbackIteratorSizeFor(3)) goto Len3;
+ if (length == FeedbackIteratorSizeFor(2)) goto Len2;
+ if (length == FeedbackIteratorSizeFor(1)) goto Len1;
+
+ unreachable;
+ } label Len4 {
+ PerformMapAndHandlerCheck(
+ 3, polymorphicArray, weakActualMap, actualHandler) otherwise Len3,
+ Deopt;
+ return kSuccess;
+ } label Len3 {
+ PerformMapAndHandlerCheck(
+ 2, polymorphicArray, weakActualMap, actualHandler) otherwise Len2,
+ Deopt;
+ return kSuccess;
+ } label Len2 {
+ PerformMapAndHandlerCheck(
+ 1, polymorphicArray, weakActualMap, actualHandler) otherwise Len1,
+ Deopt;
+ return kSuccess;
+ } label Len1 {
+ PerformMapAndHandlerCheck(
+ 0, polymorphicArray, weakActualMap, actualHandler)
+ otherwise Bailout, Deopt;
+ return kSuccess;
+ }
+ } label Bailout {
+ return kBailout;
+ } label Deopt {
+ return kDeopt;
+ }
+}
+
+macro PerformMonomorphicCheck(
+ feedbackVector: FeedbackVector, slotIndex: intptr, expectedMap: HeapObject,
+ actualMap: Map, actualHandler: Smi|DataHandler): int32 {
+ if (TaggedEqual(expectedMap, actualMap)) {
+ const handlerIndex = slotIndex + 1;
+ assert(handlerIndex < feedbackVector.length_intptr);
+ const maybeHandler =
+ Cast<Object>(feedbackVector[handlerIndex]) otherwise unreachable;
+ if (TaggedEqual(actualHandler, maybeHandler)) {
+ return kSuccess;
+ }
+
+ return kDeopt;
+ }
+
+ return kBailout;
+}
+
+// This builtin performs map checks by dynamically looking at the
+// feedback in the feedback vector.
+//
+// There are two major cases handled by this builtin:
+// (a) Monormorphic check
+// (b) Polymorphic check
+//
+// For the monormophic check, the incoming map is migrated and checked
+// against the map and handler in the feedback vector. Otherwise, we
+// bailout to the runtime.
+//
+// For the polymorphic check, the feedback vector is iterated over and
+// each of the maps & handers are compared against the incoming map and
+// handler.
+//
+// If any of the map and associated handler checks pass then we return
+// kSuccess status.
+//
+// If any of the map check passes but the associated handler check
+// fails then we return kFailure status.
+//
+// For other cases, we bailout to the runtime.
+builtin DynamicMapChecks(implicit context: Context)(
+ slotIndex: intptr, actualValue: HeapObject,
+ actualHandler: Smi|DataHandler): int32 {
+ const feedbackVector = LoadFeedbackVectorForStub();
+ let actualMap = actualValue.map;
+ const feedback = feedbackVector[slotIndex];
+ try {
+ const maybePolymorphicArray =
+ GetHeapObjectIfStrong(feedback) otherwise MigrateAndDoMonomorphicCheck;
+ return PerformPolymorphicCheck(
+ maybePolymorphicArray, actualMap, actualHandler);
+ } label MigrateAndDoMonomorphicCheck {
+ const expectedMap = GetHeapObjectAssumeWeak(feedback) otherwise Deopt;
+ if (IsDeprecatedMap(actualMap)) {
+ // TODO(gsathya): Should this migration happen before the
+ // polymorphic check?
+ const result = TryMigrateInstance(actualValue);
+ if (TaggedIsSmi(result)) {
+ return kDeopt;
+ }
+ actualMap = actualValue.map;
+ }
+ return PerformMonomorphicCheck(
+ feedbackVector, slotIndex, expectedMap, actualMap, actualHandler);
+ } label Deopt {
+ return kDeopt;
+ }
+}
+
+} // namespace ic
diff --git a/deps/v8/src/builtins/ic.tq b/deps/v8/src/builtins/ic.tq
index f6fecc557f..848d7aad58 100644
--- a/deps/v8/src/builtins/ic.tq
+++ b/deps/v8/src/builtins/ic.tq
@@ -50,10 +50,14 @@ macro IsUninitialized(feedback: MaybeObject): bool {
}
extern macro LoadFeedbackVectorSlot(FeedbackVector, uintptr): MaybeObject;
+extern operator '[]' macro LoadFeedbackVectorSlot(
+ FeedbackVector, intptr): MaybeObject;
extern macro StoreFeedbackVectorSlot(
FeedbackVector, uintptr, MaybeObject): void;
extern macro StoreWeakReferenceInFeedbackVector(
FeedbackVector, uintptr, HeapObject): MaybeObject;
extern macro ReportFeedbackUpdate(FeedbackVector, uintptr, constexpr string);
+extern operator '.length_intptr' macro LoadFeedbackVectorLength(FeedbackVector):
+ intptr;
} // namespace ic
diff --git a/deps/v8/src/builtins/internal.tq b/deps/v8/src/builtins/internal.tq
index c377a2a179..7830cffb30 100644
--- a/deps/v8/src/builtins/internal.tq
+++ b/deps/v8/src/builtins/internal.tq
@@ -47,4 +47,47 @@ builtin BytecodeBudgetInterruptFromCode(implicit context: Context)(
tail runtime::BytecodeBudgetInterruptFromCode(feedbackCell);
}
+extern transitioning macro ForInPrepareForTorque(
+ Map | FixedArray, uintptr, Undefined | FeedbackVector): FixedArray;
+
+transitioning builtin ForInPrepare(implicit _context: Context)(
+ enumerator: Map|FixedArray, slot: uintptr,
+ maybeFeedbackVector: Undefined|FeedbackVector): FixedArray {
+ return ForInPrepareForTorque(enumerator, slot, maybeFeedbackVector);
+}
+
+extern transitioning builtin ForInFilter(implicit context: Context)(
+ JSAny, HeapObject): JSAny;
+extern enum ForInFeedback extends uint31 { kAny, ...}
+extern macro UpdateFeedback(
+ SmiTagged<ForInFeedback>, Undefined | FeedbackVector, uintptr);
+
+@export
+transitioning macro ForInNextSlow(
+ context: Context, slot: uintptr, receiver: JSAnyNotSmi, key: JSAny,
+ cacheType: Object, maybeFeedbackVector: Undefined|FeedbackVector): JSAny {
+ assert(receiver.map != cacheType); // Handled on the fast path.
+ UpdateFeedback(
+ SmiTag<ForInFeedback>(ForInFeedback::kAny), maybeFeedbackVector, slot);
+ return ForInFilter(key, receiver);
+}
+
+// Note: the untagged {slot} parameter must be in the first couple of args to
+// guarantee it's allocated in a register.
+transitioning builtin ForInNext(
+ context: Context, slot: uintptr, receiver: JSAnyNotSmi,
+ cacheArray: FixedArray, cacheType: Object, cacheIndex: Smi,
+ maybeFeedbackVector: Undefined|FeedbackVector): JSAny {
+ // Load the next key from the enumeration array.
+ const key = UnsafeCast<JSAny>(cacheArray.objects[cacheIndex]);
+
+ if (receiver.map == cacheType) {
+ // The enum cache is in use for {receiver}, the {key} is definitely valid.
+ return key;
+ }
+
+ return ForInNextSlow(
+ context, slot, receiver, key, cacheType, maybeFeedbackVector);
+}
+
} // namespace internal
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 66700a7119..cba65817a4 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -68,23 +68,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
-
-void LoadStackLimit(MacroAssembler* masm, Register destination,
- StackLimitKind kind) {
- DCHECK(masm->root_array_available());
- Isolate* isolate = masm->isolate();
- ExternalReference limit =
- kind == StackLimitKind::kRealStackLimit
- ? ExternalReference::address_of_real_jslimit(isolate)
- : ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
-
- intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
- __ Lw(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
-}
-
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -103,7 +86,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ SmiTag(a0);
__ Push(cp, a0);
__ SmiUntag(a0);
-#ifdef V8_REVERSE_JSARGS
// Set up pointer to last argument (skip receiver).
__ Addu(
t2, fp,
@@ -112,15 +94,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ PushArray(t2, a0, t3, t0);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-#else
- // The receiver for the builtin/api call.
- __ PushRoot(RootIndex::kTheHoleValue);
- // Set up pointer to last argument.
- __ Addu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- __ PushArray(t2, a0, t3, t0);
-#endif
// Call the function.
// a0: number of arguments (untagged)
@@ -141,22 +114,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Ret();
}
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch1, Register scratch2,
- Label* stack_overflow) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
- // Make scratch1 the space we have left. The stack might already be overflowed
- // here which will cause scratch1 to become negative.
- __ subu(scratch1, sp, scratch1);
- // Check if the arguments will overflow the stack.
- __ sll(scratch2, num_args, kPointerSizeLog2);
- // Signed comparison.
- __ Branch(stack_overflow, le, scratch1, Operand(scratch2));
-}
-
} // namespace
// The construct stub for ES5 constructor functions and ES6 class constructors.
@@ -222,7 +179,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore new target.
__ Pop(a3);
-#ifdef V8_REVERSE_JSARGS
// Push the allocated receiver to the stack.
__ Push(v0);
// We need two copies because we may have to return the original one
@@ -235,15 +191,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Addu(
t2, fp,
Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
-#else
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(v0, v0);
-
- // Set up pointer to last argument.
- __ Addu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-#endif
// ----------- S t a t e -------------
// -- r3: new target
@@ -261,7 +208,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ SmiUntag(a0);
Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, a0, t0, t1, &stack_overflow);
+ __ StackOverflowCheck(a0, t0, t1, &stack_overflow);
__ Branch(&enough_stack_space);
__ bind(&stack_overflow);
@@ -273,14 +220,17 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ bind(&enough_stack_space);
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
+
// Copy arguments and receiver to the expression stack.
__ PushArray(t2, a0, t0, t1);
-#ifdef V8_REVERSE_JSARGS
// We need two copies because we may have to return the original one
// and the calling conventions dictate that the called function pops the
// receiver. The second copy is pushed after the arguments.
__ Push(s0);
-#endif
// Call the function.
__ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
@@ -359,7 +309,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
// Make a2 the space we have left. The stack might already be overflowed
// here which will cause a2 to become negative.
__ Subu(scratch1, sp, scratch1);
@@ -592,7 +542,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Copy arguments to the stack in a loop.
// a0: argc
// s0: argv, i.e. points to first arg
-#ifdef V8_REVERSE_JSARGS
Label loop, entry;
__ Lsa(t2, s0, a0, kPointerSizeLog2);
__ b(&entry);
@@ -608,23 +557,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the receiver.
__ Push(a3);
-#else
- // Push the receiver.
- __ Push(a3);
-
- Label loop, entry;
- __ Lsa(t2, s0, a0, kPointerSizeLog2);
- __ b(&entry);
- __ nop(); // Branch delay slot nop.
- // t2 points past last arg.
- __ bind(&loop);
- __ lw(t0, MemOperand(s0)); // Read next parameter.
- __ addiu(s0, s0, kPointerSize);
- __ lw(t0, MemOperand(t0)); // Dereference handle.
- __ push(t0); // Push parameter.
- __ bind(&entry);
- __ Branch(&loop, ne, s0, Operand(t2));
-#endif
// a0: argc
// a1: function
@@ -722,21 +654,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
__ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
-#ifndef V8_REVERSE_JSARGS
- // Push receiver.
- __ lw(t1, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
- __ Push(t1);
-#endif
-
// ----------- S t a t e -------------
// -- a1 : the JSGeneratorObject to resume
// -- t0 : generator function
// -- cp : generator context
// -- ra : return address
- // -- sp[0] : generator receiver
// -----------------------------------
// Copy the function arguments from the generator object's register file.
@@ -747,7 +673,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ lw(t1,
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
{
-#ifdef V8_REVERSE_JSARGS
Label done_loop, loop;
__ bind(&loop);
__ Subu(a3, a3, Operand(1));
@@ -760,19 +685,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Push receiver.
__ Lw(kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
__ Push(kScratchReg);
-#else
- Label done_loop, loop;
- __ Move(t2, zero_reg);
- __ bind(&loop);
- __ Subu(a3, a3, Operand(1));
- __ Branch(&done_loop, lt, a3, Operand(zero_reg));
- __ Lsa(kScratchReg, t1, t2, kPointerSizeLog2);
- __ lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
- __ Push(kScratchReg);
- __ Addu(t2, t2, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
-#endif
}
// Underlying function needs to have bytecode available.
@@ -844,29 +756,44 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
OMIT_SMI_CHECK);
}
-static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
- Register args_count = scratch;
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+ Register scratch2) {
+ Register params_size = scratch1;
- // Get the arguments + receiver count.
- __ lw(args_count,
+ // Get the size of the formal parameters + receiver (in bytes).
+ __ lw(params_size,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ lw(args_count,
- FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+ __ lw(params_size,
+ FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ Register actual_params_size = scratch2;
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Lw(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ sll(actual_params_size, actual_params_size, kPointerSizeLog2);
+ __ Addu(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ __ slt(t2, params_size, actual_params_size);
+ __ movn(params_size, actual_params_size, t2);
+#endif
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
- __ Addu(sp, sp, args_count);
+ __ Addu(sp, sp, params_size);
}
-// Tail-call |function_id| if |smi_entry| == |marker|
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register smi_entry,
- OptimizationMarker marker,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker)));
+ __ Branch(&no_match, ne, actual_marker, Operand(expected_marker));
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
@@ -882,16 +809,21 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
Register closure = a1;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
+ &heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- Label found_deoptimized_code;
__ Lw(scratch1,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Lw(scratch1,
FieldMemOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(scratch1, scratch1, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ Branch(&found_deoptimized_code, ne, scratch1, Operand(zero_reg));
+ __ Branch(&heal_optimized_code_slot, ne, scratch1, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
@@ -903,10 +835,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ Addu(a2, optimized_code_entry, Code::kHeaderSize - kHeapObjectTag);
__ Jump(a2);
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
@@ -916,7 +849,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -- optimization_marker : a int32 containing a non-zero optimization
+ // marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
@@ -933,12 +867,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
if (FLAG_debug_code) {
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel,
- optimization_marker,
- Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ __ stop();
}
}
@@ -1066,18 +999,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
__ Branch(&push_stack_frame, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
- // Read off the optimized code slot in the feedback vector, and if there
+ // Read off the optimization state in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
- Register optimized_code_entry = t0;
- __ Lw(optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ Register optimization_state = t0;
+ __ Lw(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty.
- Label optimized_code_slot_not_empty;
+ // Check if the optimized code slot is not empty or has a optimization marker.
+ Label has_optimized_code_or_marker;
- __ Branch(&optimized_code_slot_not_empty, ne, optimized_code_entry,
- Operand(Smi::FromEnum(OptimizationMarker::kNone)));
+ __ andi(t1, optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask);
+ __ Branch(&has_optimized_code_or_marker, ne, t1, Operand(zero_reg));
Label not_optimized;
__ bind(&not_optimized);
@@ -1122,7 +1055,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ Subu(t1, sp, Operand(t0));
- LoadStackLimit(masm, a2, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
__ Branch(&stack_overflow, lo, t1, Operand(a2));
// If ok, push undefined as the initial value for all register file entries.
@@ -1154,7 +1087,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
- LoadStackLimit(masm, a2, StackLimitKind::kInterruptStackLimit);
+ __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kInterruptStackLimit);
__ Branch(&stack_check_interrupt, lo, sp, Operand(a2));
__ bind(&after_stack_check_interrupt);
@@ -1196,7 +1129,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&do_return);
// The return value is in v0.
- LeaveInterpreterFrame(masm, t0);
+ LeaveInterpreterFrame(masm, t0, t1);
__ Jump(ra);
__ bind(&stack_check_interrupt);
@@ -1223,19 +1156,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&optimized_code_slot_not_empty);
+ __ bind(&has_optimized_code_or_marker);
+
Label maybe_has_optimized_code;
- // Check if optimized code marker is actually a weak reference to the
- // optimized code as opposed to an optimization marker.
- __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
- MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+ // Check if optimized code marker is available
+ __ andi(t1, optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
+ __ Branch(&maybe_has_optimized_code, eq, t1, Operand(zero_reg));
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
- // Load code entry from the weak reference, if it was cleared, resume
- // execution of unoptimized code.
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ Register optimized_code_entry = optimization_state;
+ __ Lw(optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
+
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3);
__ bind(&compile_lazy);
@@ -1259,12 +1199,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ Subu(start_address, start_address, scratch);
// Push the arguments.
-#ifdef V8_REVERSE_JSARGS
__ PushArray(start_address, num_args, scratch, scratch2,
TurboAssembler::PushArrayOrder::kReverse);
-#else
- __ PushArray(start_address, num_args, scratch, scratch2);
-#endif
}
// static
@@ -1280,19 +1216,15 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// -- a1 : the target to call (can be any Object).
// -----------------------------------
Label stack_overflow;
-
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ Subu(a0, a0, Operand(1));
}
-#endif
__ Addu(t0, a0, Operand(1)); // Add one for receiver.
- Generate_StackOverflowCheck(masm, t0, t4, t1, &stack_overflow);
+ __ StackOverflowCheck(t0, t4, t1, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Don't copy receiver.
__ mov(t0, a0);
@@ -1311,21 +1243,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// is below that.
__ Lw(a2, MemOperand(a2, -kSystemPointerSize));
}
-#else
- // Push "undefined" as the receiver arg if we need to.
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(RootIndex::kUndefinedValue);
- __ mov(t0, a0); // No receiver.
- }
-
- // This function modifies a2, t4 and t1.
- Generate_InterpreterPushArgs(masm, t0, a2, t4, t1);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(a2); // Pass the spread in a register
- __ Subu(a0, a0, Operand(1)); // Subtract one for spread
- }
-#endif
// Call the target.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
@@ -1356,9 +1273,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// -----------------------------------
Label stack_overflow;
__ addiu(t2, a0, 1);
- Generate_StackOverflowCheck(masm, t2, t1, t0, &stack_overflow);
+ __ StackOverflowCheck(t2, t1, t0, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ Subu(a0, a0, Operand(1));
@@ -1378,20 +1294,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
} else {
__ AssertUndefinedOrAllocationSite(a2, t0);
}
-#else
- // Push a slot for the receiver.
- __ push(zero_reg);
-
- // This function modified t4, t1 and t0.
- Generate_InterpreterPushArgs(masm, a0, t4, t1, t0);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(a2); // Pass the spread in a register
- __ Subu(a0, a0, Operand(1)); // Subtract one for spread
- } else {
- __ AssertUndefinedOrAllocationSite(a2, t0);
- }
-#endif
if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(a1);
@@ -1555,7 +1457,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
Register scratch = temps.Acquire(); // Temp register is not allocatable.
// Register scratch = t3;
if (with_result) {
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin) {
__ mov(scratch, v0);
} else {
@@ -1566,15 +1467,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
sp, config->num_allocatable_general_registers() * kPointerSize +
BuiltinContinuationFrameConstants::kFixedFrameSize));
}
-#else
- // Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point.
- __ sw(v0,
- MemOperand(
- sp, config->num_allocatable_general_registers() * kPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize));
- USE(scratch);
-#endif
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -1584,7 +1476,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
}
}
-#ifdef V8_REVERSE_JSARGS
if (with_result && java_script_builtin) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. t0 contains the arguments count, the return value
@@ -1597,7 +1488,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ Subu(a0, a0,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
}
-#endif
__ lw(fp, MemOperand(
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
@@ -1680,9 +1570,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
- // -- sp[0] : argArray
+ // -- sp[0] : receiver
// -- sp[4] : thisArg
- // -- sp[8] : receiver
+ // -- sp[8] : argArray
// -----------------------------------
// 1. Load receiver into a1, argArray into a2 (if present), remove all
@@ -1693,7 +1583,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadRoot(a2, RootIndex::kUndefinedValue);
__ mov(a3, a2);
// Lsa() cannot be used hare as scratch value used later.
-#ifdef V8_REVERSE_JSARGS
__ lw(a1, MemOperand(sp)); // receiver
__ Branch(&no_arg, eq, a0, Operand(zero_reg));
__ lw(a3, MemOperand(sp, kSystemPointerSize)); // thisArg
@@ -1702,22 +1591,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ bind(&no_arg);
__ Lsa(sp, sp, a0, kPointerSizeLog2);
__ sw(a3, MemOperand(sp));
-#else
- Register scratch = t0;
- __ sll(scratch, a0, kPointerSizeLog2);
- __ Addu(a0, sp, Operand(scratch));
- __ lw(a1, MemOperand(a0)); // receiver
- __ Subu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ lw(a2, MemOperand(a0)); // thisArg
- __ Subu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ lw(a3, MemOperand(a0)); // argArray
- __ bind(&no_arg);
- __ Addu(sp, sp, Operand(scratch));
- __ sw(a2, MemOperand(sp));
- __ mov(a2, a3);
-#endif
}
// ----------- S t a t e -------------
@@ -1750,7 +1623,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
-#ifdef V8_REVERSE_JSARGS
// 1. Get the callable to call (passed as receiver) from the stack.
__ Pop(a1);
@@ -1766,42 +1638,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Adjust the actual number of arguments.
__ addiu(a0, a0, -1);
-#else
- // 1. Make sure we have at least one argument.
- // a0: actual number of arguments
- {
- Label done;
- __ Branch(&done, ne, a0, Operand(zero_reg));
- __ PushRoot(RootIndex::kUndefinedValue);
- __ Addu(a0, a0, Operand(1));
- __ bind(&done);
- }
-
- // 2. Get the function to call (passed as receiver) from the stack.
- // a0: actual number of arguments
- __ LoadReceiver(a1, a0);
-
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- // a0: actual number of arguments
- // a1: function
- {
- Label loop;
- // Calculate the copy start address (destination). Copy end address is sp.
- __ Lsa(a2, sp, a0, kPointerSizeLog2);
-
- __ bind(&loop);
- __ lw(kScratchReg, MemOperand(a2, -kPointerSize));
- __ sw(kScratchReg, MemOperand(a2));
- __ Subu(a2, a2, Operand(kPointerSize));
- __ Branch(&loop, ne, a2, Operand(sp));
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ Subu(a0, a0, Operand(1));
- __ Pop();
- }
-#endif
// 4. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -1810,10 +1646,10 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
- // -- sp[0] : argumentsList
- // -- sp[4] : thisArgument
- // -- sp[8] : target
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[4] : target (if argc >= 1)
+ // -- sp[8] : thisArgument (if argc >= 2)
+ // -- sp[12] : argumentsList (if argc == 3)
// -----------------------------------
// 1. Load target into a1 (if present), argumentsList into a0 (if present),
@@ -1824,7 +1660,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRoot(a1, RootIndex::kUndefinedValue);
__ mov(a2, a1);
__ mov(a3, a1);
-#ifdef V8_REVERSE_JSARGS
__ Branch(&no_arg, eq, a0, Operand(zero_reg));
__ lw(a1, MemOperand(sp, kSystemPointerSize)); // target
__ Branch(&no_arg, eq, a0, Operand(1));
@@ -1834,25 +1669,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ bind(&no_arg);
__ Lsa(sp, sp, a0, kPointerSizeLog2);
__ sw(a3, MemOperand(sp));
-#else
- Register scratch = t0;
- __ sll(scratch, a0, kPointerSizeLog2);
- __ mov(a0, scratch);
- __ Subu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(zero_reg));
- __ Addu(a0, sp, Operand(a0));
- __ lw(a1, MemOperand(a0)); // target
- __ Subu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ lw(a2, MemOperand(a0)); // thisArgument
- __ Subu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ lw(a3, MemOperand(a0)); // argumentsList
- __ bind(&no_arg);
- __ Addu(sp, sp, Operand(scratch));
- __ sw(a2, MemOperand(sp));
- __ mov(a2, a3);
-#endif
}
// ----------- S t a t e -------------
@@ -1873,12 +1689,11 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
- // -- sp[0] : new.target (optional)
- // -- sp[4] : argumentsList
- // -- sp[8] : target
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[4] : target
+ // -- sp[8] : argumentsList
+ // -- sp[12] : new.target (optional)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
// 1. Load target into a1 (if present), argumentsList into a2 (if present),
// new.target into a3 (if present, otherwise use target), remove all
@@ -1888,7 +1703,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Label no_arg;
__ LoadRoot(a1, RootIndex::kUndefinedValue);
__ mov(a2, a1);
-#ifdef V8_REVERSE_JSARGS
__ mov(t0, a1);
__ Branch(&no_arg, eq, a0, Operand(zero_reg));
__ lw(a1, MemOperand(sp, kSystemPointerSize)); // target
@@ -1900,25 +1714,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&no_arg);
__ Lsa(sp, sp, a0, kPointerSizeLog2);
__ sw(t0, MemOperand(sp)); // set undefined to the receiver
-#else
- Register scratch = t0;
- // Lsa() cannot be used hare as scratch value used later.
- __ sll(scratch, a0, kPointerSizeLog2);
- __ Addu(a0, sp, Operand(scratch));
- __ sw(a2, MemOperand(a0)); // receiver
- __ Subu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ lw(a1, MemOperand(a0)); // target
- __ mov(a3, a1); // new.target defaults to target
- __ Subu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ lw(a2, MemOperand(a0)); // argumentsList
- __ Subu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ lw(a3, MemOperand(a0)); // new.target
- __ bind(&no_arg);
- __ Addu(sp, sp, Operand(scratch));
-#endif
}
// ----------- S t a t e -------------
@@ -1991,9 +1786,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Check for stack overflow.
Label stack_overflow;
- Generate_StackOverflowCheck(masm, t0, kScratchReg, t1, &stack_overflow);
+ __ StackOverflowCheck(t0, kScratchReg, t1, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Move the arguments already in the stack,
// including the receiver and the return address.
{
@@ -2014,7 +1808,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Addu(dest, dest, Operand(kSystemPointerSize));
__ Branch(&copy, ge, t1, Operand(zero_reg));
}
-#endif
// Push arguments onto the stack (thisArgument is already on the stack).
{
@@ -2029,12 +1822,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Branch(&push, ne, t1, Operand(kScratchReg));
__ LoadRoot(kScratchReg, RootIndex::kUndefinedValue);
__ bind(&push);
-#ifdef V8_REVERSE_JSARGS
__ Sw(kScratchReg, MemOperand(t4, 0));
__ Addu(t4, t4, Operand(kSystemPointerSize));
-#else
- __ Push(kScratchReg);
-#endif
__ Branch(&loop);
__ bind(&done);
__ Addu(a0, a0, t2);
@@ -2076,6 +1865,13 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&new_target_constructor);
}
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
+ // code is erased.
+ __ mov(t3, fp);
+ __ Lw(t2, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+#else
+
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ lw(t3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2097,17 +1893,16 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ SmiUntag(t2);
}
__ bind(&arguments_done);
+#endif
Label stack_done, stack_overflow;
__ Subu(t2, t2, a2);
__ Branch(&stack_done, le, t2, Operand(zero_reg));
{
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, t2, t0, t1, &stack_overflow);
+ __ StackOverflowCheck(t2, t0, t1, &stack_overflow);
// Forward the arguments from the caller frame.
-
-#ifdef V8_REVERSE_JSARGS
// Point to the first argument to copy (skipping the receiver).
__ Addu(t3, t3,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
@@ -2134,28 +1929,20 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Addu(dest, dest, Operand(kSystemPointerSize));
__ Branch(&copy, ge, t7, Operand(zero_reg));
}
-#endif
// Copy arguments from the caller frame.
// TODO(victorgomes): Consider using forward order as potentially more cache
// friendly.
{
Label loop;
-#ifndef V8_REVERSE_JSARGS
- __ Addu(t3, t3, Operand(CommonFrameConstants::kFixedFrameSizeAboveFp));
-#endif
__ Addu(a0, a0, t2);
__ bind(&loop);
{
__ Subu(t2, t2, Operand(1));
__ Lsa(kScratchReg, t3, t2, kPointerSizeLog2);
__ lw(kScratchReg, MemOperand(kScratchReg));
-#ifdef V8_REVERSE_JSARGS
__ Lsa(t0, a2, t2, kPointerSizeLog2);
__ Sw(kScratchReg, MemOperand(t0));
-#else
- __ push(kScratchReg);
-#endif
__ Branch(&loop, ne, t2, Operand(zero_reg));
}
}
@@ -2304,7 +2091,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Subu(t1, sp, Operand(t1));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
__ Branch(&done, hs, t1, Operand(kScratchReg));
{
FrameScope scope(masm, StackFrame::MANUAL);
@@ -2314,7 +2102,6 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
// Pop receiver.
__ Pop(t1);
@@ -2335,42 +2122,6 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Push receiver.
__ Push(t1);
-#else
- __ mov(sp, t1);
- // Relocate arguments down the stack.
- {
- Label loop, done_loop;
- __ mov(t1, zero_reg);
- __ bind(&loop);
- __ Branch(&done_loop, gt, t1, Operand(a0));
- __ Lsa(t2, sp, t0, kPointerSizeLog2);
- __ lw(kScratchReg, MemOperand(t2));
- __ Lsa(t2, sp, t1, kPointerSizeLog2);
- __ sw(kScratchReg, MemOperand(t2));
- __ Addu(t0, t0, Operand(1));
- __ Addu(t1, t1, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop, done_loop;
- __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ SmiUntag(t0);
- __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- __ Subu(t0, t0, Operand(1));
- __ Branch(&done_loop, lt, t0, Operand(zero_reg));
- __ Lsa(t1, a2, t0, kPointerSizeLog2);
- __ lw(kScratchReg, MemOperand(t1));
- __ Lsa(t1, sp, a0, kPointerSizeLog2);
- __ sw(kScratchReg, MemOperand(t1));
- __ Addu(a0, a0, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-#endif
// Call the [[BoundTargetFunction]] via the Call builtin.
__ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
@@ -2482,7 +2233,8 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Subu(t1, sp, Operand(t1));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
__ Branch(&done, hs, t1, Operand(kScratchReg));
{
FrameScope scope(masm, StackFrame::MANUAL);
@@ -2492,7 +2244,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
// Pop receiver
__ Pop(t1);
@@ -2513,42 +2264,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Push receiver.
__ Push(t1);
-#else
- __ mov(sp, t1);
- // Relocate arguments down the stack.
- {
- Label loop, done_loop;
- __ mov(t1, zero_reg);
- __ bind(&loop);
- __ Branch(&done_loop, ge, t1, Operand(a0));
- __ Lsa(t2, sp, t0, kPointerSizeLog2);
- __ lw(kScratchReg, MemOperand(t2));
- __ Lsa(t2, sp, t1, kPointerSizeLog2);
- __ sw(kScratchReg, MemOperand(t2));
- __ Addu(t0, t0, Operand(1));
- __ Addu(t1, t1, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop, done_loop;
- __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ SmiUntag(t0);
- __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- __ Subu(t0, t0, Operand(1));
- __ Branch(&done_loop, lt, t0, Operand(zero_reg));
- __ Lsa(t1, a2, t0, kPointerSizeLog2);
- __ lw(kScratchReg, MemOperand(t1));
- __ Lsa(t1, sp, a0, kPointerSizeLog2);
- __ sw(kScratchReg, MemOperand(t1));
- __ Addu(a0, a0, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-#endif
// Patch new.target to [[BoundTargetFunction]] if new.target equals target.
{
@@ -2639,14 +2354,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a3: new target (passed through to callee)
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, a2, t1, kScratchReg, &stack_overflow);
+ __ StackOverflowCheck(a2, t1, kScratchReg, &stack_overflow);
// Calculate copy start address into a0 and copy end address into t1.
-#ifdef V8_REVERSE_JSARGS
__ Lsa(a0, fp, a2, kPointerSizeLog2);
-#else
- __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
-#endif
// Adjust for return address and receiver.
__ Addu(a0, a0, Operand(2 * kPointerSize));
// Compute copy end address.
@@ -2673,9 +2384,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, a2, t1, kScratchReg, &stack_overflow);
+ __ StackOverflowCheck(a2, t1, kScratchReg, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Fill the remaining expected arguments with undefined.
__ LoadRoot(t0, RootIndex::kUndefinedValue);
__ SmiUntag(t2, a0);
@@ -2705,50 +2415,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(fp));
__ Subu(a0, a0, Operand(kSystemPointerSize));
-#else
- // Calculate copy start address into a0 and copy end address into t3.
- // a0: actual number of arguments as a smi
- // a1: function
- // a2: expected number of arguments
- // a3: new target (passed through to callee)
- __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
- // Adjust for return address and receiver.
- __ Addu(a0, a0, Operand(2 * kPointerSize));
- // Compute copy end address. Also adjust for return address.
- __ Addu(t3, fp, kPointerSize);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // a0: copy start address
- // a1: function
- // a2: expected number of arguments
- // a3: new target (passed through to callee)
- // t3: copy end address
- Label copy;
- __ bind(&copy);
- __ lw(t0, MemOperand(a0)); // Adjusted above for return addr and receiver.
- __ Subu(sp, sp, kPointerSize);
- __ Subu(a0, a0, kPointerSize);
- __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(t3));
- __ sw(t0, MemOperand(sp)); // In the delay slot.
-
- // Fill the remaining expected arguments with undefined.
- // a1: function
- // a2: expected number of arguments
- // a3: new target (passed through to callee)
- __ LoadRoot(t0, RootIndex::kUndefinedValue);
- __ sll(t2, a2, kPointerSizeLog2);
- __ Subu(t1, fp, Operand(t2));
- // Adjust for frame.
- __ Subu(t1, t1,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ Subu(sp, sp, kPointerSize);
- __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(t1));
- __ sw(t0, MemOperand(sp));
-#endif
}
// Call the entry point.
@@ -3243,11 +2909,10 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- a2 : arguments count (not including the receiver)
// -- a3 : call data
// -- a0 : holder
- // --
- // -- sp[0] : last argument
+ // -- sp[0] : receiver
+ // -- sp[8] : first argument
// -- ...
- // -- sp[(argc - 1) * 4] : first argument
- // -- sp[(argc + 0) * 4] : receiver
+ // -- sp[(argc) * 8] : last argument
// -----------------------------------
Register api_function_address = a1;
@@ -3322,15 +2987,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
-#ifdef V8_REVERSE_JSARGS
__ Addu(scratch, scratch,
Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
-#else
- __ Addu(scratch, scratch,
- Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
- __ sll(t2, argc, kSystemPointerSizeLog2);
- __ Addu(scratch, scratch, t2);
-#endif
__ sw(scratch, MemOperand(sp, 2 * kPointerSize));
// FunctionCallbackInfo::length_.
@@ -3977,6 +3635,219 @@ void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
}
}
+namespace {
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Unlike on ARM we don't save all the registers, just the useful ones.
+ // For the rest, there are gaps on the stack, so the offsets remain the same.
+ static constexpr int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+ RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+
+ static constexpr int kDoubleRegsSize =
+ kDoubleSize * DoubleRegister::kNumRegisters;
+
+ // Save all FPU registers before messing with them.
+ __ Subu(sp, sp, Operand(kDoubleRegsSize));
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ Sdc1(fpu_reg, MemOperand(sp, offset));
+ }
+
+ // Push saved_regs (needed to populate FrameDescription::registers_).
+ // Leave gaps for other registers.
+ __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
+ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+ if ((saved_regs & (1 << i)) != 0) {
+ __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
+ }
+ }
+
+ __ li(a2,
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
+ __ sw(fp, MemOperand(a2));
+
+ static constexpr int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+ __ li(a2, Operand(Deoptimizer::kFixedExitSizeMarker));
+ // Get the address of the location in the code object (a3) (return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register t0.
+ __ mov(a3, ra);
+ __ Addu(t0, sp, Operand(kSavedRegistersAreaSize));
+ __ Subu(t0, fp, t0);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(6, t1);
+ // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
+ __ mov(a0, zero_reg);
+ Label context_check;
+ __ lw(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(a1, &context_check);
+ __ lw(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ li(a1, Operand(static_cast<int>(deopt_kind)));
+ // a2: bailout id already loaded.
+ // a3: code address or 0 already loaded.
+ __ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
+ __ li(t1, ExternalReference::isolate_address(isolate));
+ __ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve "deoptimizer" object in register v0 and get the input
+ // frame descriptor pointer to a1 (deoptimizer->input_);
+ // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
+ __ mov(a0, v0);
+ __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((saved_regs & (1 << i)) != 0) {
+ __ lw(a2, MemOperand(sp, i * kPointerSize));
+ __ sw(a2, MemOperand(a1, offset));
+ } else if (FLAG_debug_code) {
+ __ li(a2, kDebugZapValue);
+ __ sw(a2, MemOperand(a1, offset));
+ }
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Copy FPU registers to
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ Ldc1(f0, MemOperand(sp, src_offset));
+ __ Sdc1(f0, MemOperand(a1, dst_offset));
+ }
+
+ // Remove the saved registers from the stack.
+ __ Addu(sp, sp, Operand(kSavedRegistersAreaSize));
+
+ // Compute a pointer to the unwinding limit in register a2; that is
+ // the first stack slot not part of the input frame.
+ __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+ __ Addu(a2, a2, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ BranchShort(&pop_loop_header);
+ __ bind(&pop_loop);
+ __ pop(t0);
+ __ sw(t0, MemOperand(a3, 0));
+ __ addiu(a3, a3, sizeof(uint32_t));
+ __ bind(&pop_loop_header);
+ __ BranchShort(&pop_loop, ne, a2, Operand(sp));
+
+ // Compute the output frame in the deoptimizer.
+ __ push(a0); // Preserve deoptimizer object across call.
+ // a0: deoptimizer object; a1: scratch.
+ __ PrepareCallCFunction(1, a1);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
+
+ __ lw(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: t0 = current "FrameDescription** output_",
+ // a1 = one past the last FrameDescription**.
+ __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+ __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
+ __ Lsa(a1, t0, a1, kPointerSizeLog2);
+ __ BranchShort(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
+ __ lw(a2, MemOperand(t0, 0)); // output_[ix]
+ __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+ __ BranchShort(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ Subu(a3, a3, Operand(sizeof(uint32_t)));
+ __ Addu(t2, a2, Operand(a3));
+ __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
+ __ push(t3);
+ __ bind(&inner_loop_header);
+ __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
+
+ __ Addu(t0, t0, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ BranchShort(&outer_push_loop, lt, t0, Operand(a1));
+
+ __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
+ }
+
+ // Push pc and continuation from the last output frame.
+ __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
+ __ push(t2);
+ __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
+ __ push(t2);
+
+ // Technically restoring 'at' should work unless zero_reg is also restored
+ // but it's safer to check for this.
+ DCHECK(!(at.bit() & restored_regs));
+ // Restore the registers from the last output frame.
+ __ mov(at, a2);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ lw(ToRegister(i), MemOperand(at, offset));
+ }
+ }
+
+ __ pop(at); // Get continuation, leave pc on stack.
+ __ pop(ra);
+ __ Jump(at);
+ __ stop();
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 04fce6b2a1..1027ec35e5 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -67,24 +67,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
-
-void LoadStackLimit(MacroAssembler* masm, Register destination,
- StackLimitKind kind) {
- DCHECK(masm->root_array_available());
- Isolate* isolate = masm->isolate();
- ExternalReference limit =
- kind == StackLimitKind::kRealStackLimit
- ? ExternalReference::address_of_real_jslimit(isolate)
- : ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
-
- intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
- CHECK(is_int32(offset));
- __ Ld(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
-}
-
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -104,7 +86,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Push(cp, a0);
__ SmiUntag(a0);
-#ifdef V8_REVERSE_JSARGS
// Set up pointer to last argument (skip receiver).
__ Daddu(
t2, fp,
@@ -113,15 +94,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ PushArray(t2, a0, t3, t0);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-#else
- // The receiver for the builtin/api call.
- __ PushRoot(RootIndex::kTheHoleValue);
- // Set up pointer to last argument.
- __ Daddu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- __ PushArray(t2, a0, t3, t0);
-#endif
// Call the function.
// a0: number of arguments (untagged)
@@ -143,22 +115,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Ret();
}
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch1, Register scratch2,
- Label* stack_overflow) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
- // Make scratch1 the space we have left. The stack might already be overflowed
- // here which will cause scratch1 to become negative.
- __ dsubu(scratch1, sp, scratch1);
- // Check if the arguments will overflow the stack.
- __ dsll(scratch2, num_args, kPointerSizeLog2);
- // Signed comparison.
- __ Branch(stack_overflow, le, scratch1, Operand(scratch2));
-}
-
} // namespace
// The construct stub for ES5 constructor functions and ES6 class constructors.
@@ -224,7 +180,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore new target.
__ Pop(a3);
-#ifdef V8_REVERSE_JSARGS
// Push the allocated receiver to the stack.
__ Push(v0);
@@ -237,15 +192,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Set up pointer to last argument.
__ Daddu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize));
-#else
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(v0, v0);
-
- // Set up pointer to last argument.
- __ Daddu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-#endif
// ----------- S t a t e -------------
// -- r3: new target
@@ -263,7 +209,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ SmiUntag(a0);
Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, a0, t0, t1, &stack_overflow);
+ __ StackOverflowCheck(a0, t0, t1, &stack_overflow);
__ Branch(&enough_stack_space);
__ bind(&stack_overflow);
@@ -275,14 +221,17 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ bind(&enough_stack_space);
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
+
// Copy arguments and receiver to the expression stack.
__ PushArray(t2, a0, t0, t1);
-#ifdef V8_REVERSE_JSARGS
// We need two copies because we may have to return the original one
// and the calling conventions dictate that the called function pops the
// receiver. The second copy is pushed after the arguments,
__ Push(a6);
-#endif
// Call the function.
__ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
@@ -400,21 +349,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
__ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
-#ifndef V8_REVERSE_JSARGS
- // Push receiver.
- __ Ld(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
- __ Push(a5);
-#endif
-
// ----------- S t a t e -------------
// -- a1 : the JSGeneratorObject to resume
// -- a4 : generator function
// -- cp : generator context
// -- ra : return address
- // -- sp[0] : generator receiver
// -----------------------------------
// Push holes for arguments to generator function. Since the parser forced
@@ -427,7 +370,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Ld(t1,
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
{
-#ifdef V8_REVERSE_JSARGS
Label done_loop, loop;
__ bind(&loop);
__ Dsubu(a3, a3, Operand(1));
@@ -440,19 +382,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Push receiver.
__ Ld(kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
__ Push(kScratchReg);
-#else
- Label done_loop, loop;
- __ Move(t2, zero_reg);
- __ bind(&loop);
- __ Dsubu(a3, a3, Operand(1));
- __ Branch(&done_loop, lt, a3, Operand(zero_reg));
- __ Dlsa(kScratchReg, t1, t2, kPointerSizeLog2);
- __ Ld(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
- __ Push(kScratchReg);
- __ Daddu(t2, t2, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
-#endif
}
// Underlying function needs to have bytecode available.
@@ -524,7 +453,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
// Make a2 the space we have left. The stack might already be overflowed
// here which will cause r2 to become negative.
__ dsubu(scratch1, sp, scratch1);
@@ -771,7 +700,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Copy arguments to the stack in a loop.
// a4: argc
// a5: argv, i.e. points to first arg
-#ifdef V8_REVERSE_JSARGS
Label loop, entry;
__ Dlsa(s1, a5, a4, kPointerSizeLog2);
__ b(&entry);
@@ -787,24 +715,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the receive.
__ Push(a3);
-#else
- // Push the receive.
- __ Push(a3);
-
- Label loop, entry;
- __ Dlsa(s1, a5, a4, kPointerSizeLog2);
- __ b(&entry);
- __ nop(); // Branch delay slot nop.
- // s1 points past last arg.
- __ bind(&loop);
- __ Ld(s2, MemOperand(a5)); // Read next parameter.
- __ daddiu(a5, a5, kPointerSize);
- __ Ld(s2, MemOperand(s2)); // Dereference handle.
- __ push(s2); // Push parameter.
- __ bind(&entry);
- __ Branch(&loop, ne, a5, Operand(s1));
-
-#endif
// a0: argc
// a1: function
@@ -863,28 +773,44 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
OMIT_SMI_CHECK);
}
-static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
- Register args_count = scratch;
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+ Register scratch2) {
+ Register params_size = scratch1;
- // Get the arguments + receiver count.
- __ Ld(args_count,
+ // Get the size of the formal parameters + receiver (in bytes).
+ __ Ld(params_size,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ Lw(t0, FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+ __ Lw(params_size,
+ FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ Register actual_params_size = scratch2;
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Ld(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ dsll(actual_params_size, actual_params_size, kPointerSizeLog2);
+ __ Daddu(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ __ slt(t2, params_size, actual_params_size);
+ __ movn(params_size, actual_params_size, t2);
+#endif
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
- __ Daddu(sp, sp, args_count);
+ __ Daddu(sp, sp, params_size);
}
-// Tail-call |function_id| if |smi_entry| == |marker|
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register smi_entry,
- OptimizationMarker marker,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker)));
+ __ Branch(&no_match, ne, actual_marker, Operand(expected_marker));
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
@@ -900,15 +826,20 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
Register closure = a1;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
+ &heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- Label found_deoptimized_code;
__ Ld(a5,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Lw(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ Branch(&found_deoptimized_code, ne, a5, Operand(zero_reg));
+ __ Branch(&heal_optimized_code_slot, ne, a5, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
@@ -922,10 +853,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2);
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
@@ -935,7 +867,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -- optimization_marker : a int32 containing a non-zero optimization
+ // marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
@@ -952,12 +885,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
if (FLAG_debug_code) {
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel,
- optimization_marker,
- Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ __ stop();
}
}
@@ -1085,18 +1017,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
- // Read off the optimized code slot in the feedback vector, and if there
+ // Read off the optimization state in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
- Register optimized_code_entry = a4;
- __ Ld(optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ Register optimization_state = a4;
+ __ Lw(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty.
- Label optimized_code_slot_not_empty;
+ // Check if the optimized code slot is not empty or has a optimization marker.
+ Label has_optimized_code_or_marker;
- __ Branch(&optimized_code_slot_not_empty, ne, optimized_code_entry,
- Operand(Smi::FromEnum(OptimizationMarker::kNone)));
+ __ andi(t0, optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask);
+ __ Branch(&has_optimized_code_or_marker, ne, t0, Operand(zero_reg));
Label not_optimized;
__ bind(&not_optimized);
@@ -1141,7 +1073,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ Dsubu(a5, sp, Operand(a4));
- LoadStackLimit(masm, a2, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
__ Branch(&stack_overflow, lo, a5, Operand(a2));
// If ok, push undefined as the initial value for all register file entries.
@@ -1173,7 +1105,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
- LoadStackLimit(masm, a5, StackLimitKind::kInterruptStackLimit);
+ __ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit);
__ Branch(&stack_check_interrupt, lo, sp, Operand(a5));
__ bind(&after_stack_check_interrupt);
@@ -1216,7 +1148,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&do_return);
// The return value is in v0.
- LeaveInterpreterFrame(masm, t0);
+ LeaveInterpreterFrame(masm, t0, t1);
__ Jump(ra);
__ bind(&stack_check_interrupt);
@@ -1243,19 +1175,25 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&optimized_code_slot_not_empty);
+ __ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
- // Check if optimized code marker is actually a weak reference to the
- // optimized code as opposed to an optimization marker.
- __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
- MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+ // Check if optimized code marker is available
+ __ andi(t0, optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
+ __ Branch(&maybe_has_optimized_code, eq, t0, Operand(zero_reg));
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
- // Load code entry from the weak reference, if it was cleared, resume
- // execution of unoptimized code.
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ Register optimized_code_entry = optimization_state;
+ __ Ld(optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
+
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5);
__ bind(&compile_lazy);
@@ -1280,12 +1218,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ Dsubu(start_address, start_address, scratch);
// Push the arguments.
-#ifdef V8_REVERSE_JSARGS
- __ PushArray(start_address, num_args, scratch, scratch2,
- TurboAssembler::PushArrayOrder::kReverse);
-#else
- __ PushArray(start_address, num_args, scratch, scratch2);
-#endif
+ __ PushArray(start_address, num_args, scratch, scratch2,
+ TurboAssembler::PushArrayOrder::kReverse);
}
// static
@@ -1301,19 +1235,15 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// -- a1 : the target to call (can be any Object).
// -----------------------------------
Label stack_overflow;
-
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ Dsubu(a0, a0, Operand(1));
}
-#endif
__ Daddu(a3, a0, Operand(1)); // Add one for receiver.
- Generate_StackOverflowCheck(masm, a3, a4, t0, &stack_overflow);
+ __ StackOverflowCheck(a3, a4, t0, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Don't copy receiver.
__ mov(a3, a0);
@@ -1332,21 +1262,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// is below that.
__ Ld(a2, MemOperand(a2, -kSystemPointerSize));
}
-#else
- // Push "undefined" as the receiver arg if we need to.
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(RootIndex::kUndefinedValue);
- __ mov(a3, a0);
- }
-
- // This function modifies a2, t0 and a4.
- Generate_InterpreterPushArgs(masm, a3, a2, a4, t0);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(a2); // Pass the spread in a register
- __ Dsubu(a0, a0, Operand(1)); // Subtract one for spread
- }
-#endif
// Call the target.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
@@ -1377,9 +1292,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// -----------------------------------
Label stack_overflow;
__ daddiu(a6, a0, 1);
- Generate_StackOverflowCheck(masm, a6, a5, t0, &stack_overflow);
+ __ StackOverflowCheck(a6, a5, t0, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ Dsubu(a0, a0, Operand(1));
@@ -1399,20 +1313,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
} else {
__ AssertUndefinedOrAllocationSite(a2, t0);
}
-#else
- // Push a slot for the receiver.
- __ push(zero_reg);
-
- // This function modifies t0, a4 and a5.
- Generate_InterpreterPushArgs(masm, a0, a4, a5, t0);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(a2); // Pass the spread in a register
- __ Dsubu(a0, a0, Operand(1)); // Subtract one for spread
- } else {
- __ AssertUndefinedOrAllocationSite(a2, t0);
- }
-#endif
if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(a1);
@@ -1573,7 +1473,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
int allocatable_register_count = config->num_allocatable_general_registers();
Register scratch = t3;
if (with_result) {
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin) {
__ mov(scratch, v0);
} else {
@@ -1584,15 +1483,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
sp, config->num_allocatable_general_registers() * kPointerSize +
BuiltinContinuationFrameConstants::kFixedFrameSize));
}
-#else
- // Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point.
- __ Sd(v0,
- MemOperand(
- sp, config->num_allocatable_general_registers() * kPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize));
- USE(scratch);
-#endif
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -1602,7 +1492,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
}
}
-#ifdef V8_REVERSE_JSARGS
if (with_result && java_script_builtin) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. t0 contains the arguments count, the return value
@@ -1615,7 +1504,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ Dsubu(a0, a0,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
}
-#endif
__ Ld(fp, MemOperand(
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
@@ -1697,9 +1585,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
- // -- sp[0] : argArray
+ // -- sp[0] : receiver
// -- sp[4] : thisArg
- // -- sp[8] : receiver
+ // -- sp[8] : argArray
// -----------------------------------
Register argc = a0;
@@ -1718,7 +1606,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// Claim (2 - argc) dummy arguments form the stack, to put the stack in a
// consistent state for a simple pop operation.
-#ifdef V8_REVERSE_JSARGS
__ mov(scratch, argc);
__ Ld(this_arg, MemOperand(sp, kPointerSize));
__ Ld(arg_array, MemOperand(sp, 2 * kPointerSize));
@@ -1729,18 +1616,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Ld(receiver, MemOperand(sp));
__ Dlsa(sp, sp, argc, kSystemPointerSizeLog2);
__ Sd(this_arg, MemOperand(sp));
-#else
- __ Dsubu(sp, sp, Operand(2 * kPointerSize));
- __ Dlsa(sp, sp, argc, kPointerSizeLog2);
- __ mov(scratch, argc);
- __ Pop(this_arg, arg_array); // Overwrite argc
- __ Movz(arg_array, undefined_value, scratch); // if argc == 0
- __ Movz(this_arg, undefined_value, scratch); // if argc == 0
- __ Dsubu(scratch, scratch, Operand(1));
- __ Movz(arg_array, undefined_value, scratch); // if argc == 1
- __ Ld(receiver, MemOperand(sp));
- __ Sd(this_arg, MemOperand(sp));
-#endif
}
// ----------- S t a t e -------------
@@ -1775,7 +1650,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
-#ifdef V8_REVERSE_JSARGS
// 1. Get the callable to call (passed as receiver) from the stack.
{
__ Pop(a1);
@@ -1793,42 +1667,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Adjust the actual number of arguments.
__ daddiu(a0, a0, -1);
-#else
- // 1. Make sure we have at least one argument.
- // a0: actual number of arguments
- {
- Label done;
- __ Branch(&done, ne, a0, Operand(zero_reg));
- __ PushRoot(RootIndex::kUndefinedValue);
- __ Daddu(a0, a0, Operand(1));
- __ bind(&done);
- }
-
- // 2. Get the function to call (passed as receiver) from the stack.
- // a0: actual number of arguments
- __ LoadReceiver(a1, a0);
-
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- // a0: actual number of arguments
- // a1: function
- {
- Label loop;
- // Calculate the copy start address (destination). Copy end address is sp.
- __ Dlsa(a2, sp, a0, kPointerSizeLog2);
-
- __ bind(&loop);
- __ Ld(kScratchReg, MemOperand(a2, -kPointerSize));
- __ Sd(kScratchReg, MemOperand(a2));
- __ Dsubu(a2, a2, Operand(kPointerSize));
- __ Branch(&loop, ne, a2, Operand(sp));
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ Dsubu(a0, a0, Operand(1));
- __ Pop();
- }
-#endif
// 4. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -1837,10 +1675,10 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
- // -- sp[0] : argumentsList (if argc ==3)
- // -- sp[4] : thisArgument (if argc >=2)
- // -- sp[8] : target (if argc >=1)
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[8] : target (if argc >= 1)
+ // -- sp[16] : thisArgument (if argc >= 2)
+ // -- sp[24] : argumentsList (if argc == 3)
// -----------------------------------
Register argc = a0;
@@ -1859,7 +1697,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// Claim (3 - argc) dummy arguments form the stack, to put the stack in a
// consistent state for a simple pop operation.
-#ifdef V8_REVERSE_JSARGS
__ mov(scratch, argc);
__ Ld(target, MemOperand(sp, kPointerSize));
__ Ld(this_argument, MemOperand(sp, 2 * kPointerSize));
@@ -1875,22 +1712,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Dlsa(sp, sp, argc, kSystemPointerSizeLog2);
__ Sd(this_argument, MemOperand(sp, 0)); // Overwrite receiver
-#else
- __ Dsubu(sp, sp, Operand(3 * kPointerSize));
- __ Dlsa(sp, sp, argc, kPointerSizeLog2);
- __ mov(scratch, argc);
- __ Pop(target, this_argument, arguments_list);
- __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
- __ Movz(this_argument, undefined_value, scratch); // if argc == 0
- __ Movz(target, undefined_value, scratch); // if argc == 0
- __ Dsubu(scratch, scratch, Operand(1));
- __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
- __ Movz(this_argument, undefined_value, scratch); // if argc == 1
- __ Dsubu(scratch, scratch, Operand(1));
- __ Movz(arguments_list, undefined_value, scratch); // if argc == 2
-
- __ Sd(this_argument, MemOperand(sp, 0)); // Overwrite receiver
-#endif
}
// ----------- S t a t e -------------
@@ -1912,12 +1733,11 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
- // -- sp[0] : new.target (optional) (dummy value if argc <= 2)
- // -- sp[4] : argumentsList (dummy value if argc <= 1)
- // -- sp[8] : target (dummy value if argc == 0)
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[8] : target
+ // -- sp[16] : argumentsList
+ // -- sp[24] : new.target (optional)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
Register argc = a0;
Register arguments_list = a2;
@@ -1936,7 +1756,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// Claim (3 - argc) dummy arguments form the stack, to put the stack in a
// consistent state for a simple pop operation.
-#ifdef V8_REVERSE_JSARGS
__ mov(scratch, argc);
__ Ld(target, MemOperand(sp, kPointerSize));
__ Ld(arguments_list, MemOperand(sp, 2 * kPointerSize));
@@ -1952,22 +1771,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Dlsa(sp, sp, argc, kSystemPointerSizeLog2);
__ Sd(undefined_value, MemOperand(sp, 0)); // Overwrite receiver
-#else
- __ Dsubu(sp, sp, Operand(3 * kPointerSize));
- __ Dlsa(sp, sp, argc, kPointerSizeLog2);
- __ mov(scratch, argc);
- __ Pop(target, arguments_list, new_target);
- __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
- __ Movz(new_target, undefined_value, scratch); // if argc == 0
- __ Movz(target, undefined_value, scratch); // if argc == 0
- __ Dsubu(scratch, scratch, Operand(1));
- __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
- __ Movz(new_target, target, scratch); // if argc == 1
- __ Dsubu(scratch, scratch, Operand(1));
- __ Movz(new_target, target, scratch); // if argc == 2
-
- __ Sd(undefined_value, MemOperand(sp, 0)); // Overwrite receiver
-#endif
}
// ----------- S t a t e -------------
@@ -2044,9 +1847,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Check for stack overflow.
Label stack_overflow;
- Generate_StackOverflowCheck(masm, len, kScratchReg, a5, &stack_overflow);
+ __ StackOverflowCheck(len, kScratchReg, a5, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Move the arguments already in the stack,
// including the receiver and the return address.
{
@@ -2067,7 +1869,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Daddu(dest, dest, Operand(kSystemPointerSize));
__ Branch(&copy, ge, t0, Operand(zero_reg));
}
-#endif
// Push arguments onto the stack (thisArgument is already on the stack).
{
@@ -2087,13 +1888,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Branch(&push, ne, a5, Operand(t1));
__ LoadRoot(a5, RootIndex::kUndefinedValue);
__ bind(&push);
-#ifdef V8_REVERSE_JSARGS
__ Sd(a5, MemOperand(a7, 0));
__ Daddu(a7, a7, Operand(kSystemPointerSize));
__ Daddu(scratch, scratch, Operand(kSystemPointerSize));
-#else
- __ Push(a5);
-#endif
__ Branch(&loop, ne, scratch, Operand(sp));
__ bind(&done);
}
@@ -2134,6 +1931,13 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&new_target_constructor);
}
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
+ // code is erased.
+ __ mov(a6, fp);
+ __ Ld(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+#else
+
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ Ld(a6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2155,17 +1959,17 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
MemOperand(a6, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ bind(&arguments_done);
+#endif
Label stack_done, stack_overflow;
__ Subu(a7, a7, a2);
__ Branch(&stack_done, le, a7, Operand(zero_reg));
{
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, a7, a4, a5, &stack_overflow);
+ __ StackOverflowCheck(a7, a4, a5, &stack_overflow);
// Forward the arguments from the caller frame.
-#ifdef V8_REVERSE_JSARGS
// Point to the first argument to copy (skipping the receiver).
__ Daddu(a6, a6,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
@@ -2192,28 +1996,20 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Daddu(dest, dest, Operand(kSystemPointerSize));
__ Branch(&copy, ge, t2, Operand(zero_reg));
}
-#endif
// Copy arguments from the caller frame.
// TODO(victorgomes): Consider using forward order as potentially more cache
// friendly.
{
Label loop;
-#ifndef V8_REVERSE_JSARGS
- __ Daddu(a6, a6, Operand(CommonFrameConstants::kFixedFrameSizeAboveFp));
-#endif
__ Daddu(a0, a0, a7);
__ bind(&loop);
{
__ Subu(a7, a7, Operand(1));
__ Dlsa(t0, a6, a7, kPointerSizeLog2);
__ Ld(kScratchReg, MemOperand(t0));
-#ifdef V8_REVERSE_JSARGS
__ Dlsa(t0, a2, a7, kPointerSizeLog2);
__ Sd(kScratchReg, MemOperand(t0));
-#else
- __ push(kScratchReg);
-#endif
__ Branch(&loop, ne, a7, Operand(zero_reg));
}
}
@@ -2361,7 +2157,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Dsubu(t0, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
__ Branch(&done, hs, t0, Operand(kScratchReg));
{
FrameScope scope(masm, StackFrame::MANUAL);
@@ -2371,7 +2168,6 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
// Pop receiver.
__ Pop(t0);
@@ -2393,41 +2189,6 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Push receiver.
__ Push(t0);
-#else
- __ mov(sp, t0);
- // Relocate arguments down the stack.
- {
- Label loop, done_loop;
- __ mov(a5, zero_reg);
- __ bind(&loop);
- __ Branch(&done_loop, gt, a5, Operand(a0));
- __ Dlsa(a6, sp, a4, kPointerSizeLog2);
- __ Ld(kScratchReg, MemOperand(a6));
- __ Dlsa(a6, sp, a5, kPointerSizeLog2);
- __ Sd(kScratchReg, MemOperand(a6));
- __ Daddu(a4, a4, Operand(1));
- __ Daddu(a5, a5, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop, done_loop;
- __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- __ Dsubu(a4, a4, Operand(1));
- __ Branch(&done_loop, lt, a4, Operand(zero_reg));
- __ Dlsa(a5, a2, a4, kPointerSizeLog2);
- __ Ld(kScratchReg, MemOperand(a5));
- __ Dlsa(a5, sp, a0, kPointerSizeLog2);
- __ Sd(kScratchReg, MemOperand(a5));
- __ Daddu(a0, a0, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-#endif
// Call the [[BoundTargetFunction]] via the Call builtin.
__ Ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
@@ -2536,7 +2297,8 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Dsubu(t0, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
__ Branch(&done, hs, t0, Operand(kScratchReg));
{
FrameScope scope(masm, StackFrame::MANUAL);
@@ -2546,7 +2308,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
// Pop receiver.
__ Pop(t0);
@@ -2568,41 +2329,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Push receiver.
__ Push(t0);
-#else
- __ mov(sp, t0);
- // Relocate arguments down the stack.
- {
- Label loop, done_loop;
- __ mov(a5, zero_reg);
- __ bind(&loop);
- __ Branch(&done_loop, ge, a5, Operand(a0));
- __ Dlsa(a6, sp, a4, kPointerSizeLog2);
- __ Ld(kScratchReg, MemOperand(a6));
- __ Dlsa(a6, sp, a5, kPointerSizeLog2);
- __ Sd(kScratchReg, MemOperand(a6));
- __ Daddu(a4, a4, Operand(1));
- __ Daddu(a5, a5, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop, done_loop;
- __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- __ Dsubu(a4, a4, Operand(1));
- __ Branch(&done_loop, lt, a4, Operand(zero_reg));
- __ Dlsa(a5, a2, a4, kPointerSizeLog2);
- __ Ld(kScratchReg, MemOperand(a5));
- __ Dlsa(a5, sp, a0, kPointerSizeLog2);
- __ Sd(kScratchReg, MemOperand(a5));
- __ Daddu(a0, a0, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-#endif
// Patch new.target to [[BoundTargetFunction]] if new.target equals target.
{
@@ -2693,16 +2419,11 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a3: new target (passed through to callee)
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, a2, a5, kScratchReg, &stack_overflow);
+ __ StackOverflowCheck(a2, a5, kScratchReg, &stack_overflow);
// Calculate copy start address into a0 and copy end address into a4.
-#ifdef V8_REVERSE_JSARGS
__ dsll(a0, a2, kPointerSizeLog2);
__ Daddu(a0, fp, a0);
-#else
- __ SmiScale(a0, a0, kPointerSizeLog2);
- __ Daddu(a0, fp, a0);
-#endif
// Adjust for return address and receiver.
__ Daddu(a0, a0, Operand(2 * kPointerSize));
@@ -2730,9 +2451,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, a2, a5, kScratchReg, &stack_overflow);
+ __ StackOverflowCheck(a2, a5, kScratchReg, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Fill the remaining expected arguments with undefined.
__ LoadRoot(t0, RootIndex::kUndefinedValue);
__ SmiUntag(t1, a0);
@@ -2763,51 +2483,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(fp));
__ Dsubu(a0, a0, Operand(kSystemPointerSize));
-#else
- // Calculate copy start address into a0 and copy end address into a7.
- // a0: actual number of arguments as a smi
- // a1: function
- // a2: expected number of arguments
- // a3: new target (passed through to callee)
- __ SmiScale(a0, a0, kPointerSizeLog2);
- __ Daddu(a0, fp, a0);
- // Adjust for return address and receiver.
- __ Daddu(a0, a0, Operand(2 * kPointerSize));
- // Compute copy end address. Also adjust for return address.
- __ Daddu(a7, fp, kPointerSize);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // a0: copy start address
- // a1: function
- // a2: expected number of arguments
- // a3: new target (passed through to callee)
- // a7: copy end address
- Label copy;
- __ bind(&copy);
- __ Ld(a4, MemOperand(a0)); // Adjusted above for return addr and receiver.
- __ Dsubu(sp, sp, kPointerSize);
- __ Dsubu(a0, a0, kPointerSize);
- __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a7));
- __ Sd(a4, MemOperand(sp)); // In the delay slot.
-
- // Fill the remaining expected arguments with undefined.
- // a1: function
- // a2: expected number of arguments
- // a3: new target (passed through to callee)
- __ LoadRoot(a5, RootIndex::kUndefinedValue);
- __ dsll(a6, a2, kPointerSizeLog2);
- __ Dsubu(a4, fp, Operand(a6));
- // Adjust for frame.
- __ Dsubu(a4, a4,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ Dsubu(sp, sp, kPointerSize);
- __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a4));
- __ Sd(a5, MemOperand(sp));
-#endif
}
// Call the entry point.
@@ -3304,11 +2979,10 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- a2 : arguments count (not including the receiver)
// -- a3 : call data
// -- a0 : holder
- // --
- // -- sp[0] : last argument
+ // -- sp[0] : receiver
+ // -- sp[8] : first argument
// -- ...
- // -- sp[(argc - 1) * 8] : first argument
- // -- sp[(argc + 0) * 8] : receiver
+ // -- sp[(argc) * 8] : last argument
// -----------------------------------
Register api_function_address = a1;
@@ -3385,15 +3059,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
-#ifdef V8_REVERSE_JSARGS
__ Daddu(scratch, scratch,
Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
-#else
- __ Daddu(scratch, scratch,
- Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
- __ dsll(t2, argc, kSystemPointerSizeLog2);
- __ Daddu(scratch, scratch, t2);
-#endif
__ Sd(scratch, MemOperand(sp, 2 * kPointerSize));
@@ -3533,6 +3200,218 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ Jump(t9);
}
+namespace {
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Unlike on ARM we don't save all the registers, just the useful ones.
+ // For the rest, there are gaps on the stack, so the offsets remain the same.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+ RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
+
+ // Save all double FPU registers before messing with them.
+ __ Dsubu(sp, sp, Operand(kDoubleRegsSize));
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ Sdc1(fpu_reg, MemOperand(sp, offset));
+ }
+
+ // Push saved_regs (needed to populate FrameDescription::registers_).
+ // Leave gaps for other registers.
+ __ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize);
+ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+ if ((saved_regs & (1 << i)) != 0) {
+ __ Sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
+ }
+ }
+
+ __ li(a2,
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
+ __ Sd(fp, MemOperand(a2));
+
+ const int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+ __ li(a2, Operand(Deoptimizer::kFixedExitSizeMarker));
+ // Get the address of the location in the code object (a3) (return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register a4.
+ __ mov(a3, ra);
+ __ Daddu(a4, sp, Operand(kSavedRegistersAreaSize));
+
+ __ Dsubu(a4, fp, a4);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(6, a5);
+ // Pass six arguments, according to n64 ABI.
+ __ mov(a0, zero_reg);
+ Label context_check;
+ __ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(a1, &context_check);
+ __ Ld(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ li(a1, Operand(static_cast<int>(deopt_kind)));
+ // a2: bailout id already loaded.
+ // a3: code address or 0 already loaded.
+ // a4: already has fp-to-sp delta.
+ __ li(a5, ExternalReference::isolate_address(isolate));
+
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve "deoptimizer" object in register v0 and get the input
+ // frame descriptor pointer to a1 (deoptimizer->input_);
+ // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
+ __ mov(a0, v0);
+ __ Ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((saved_regs & (1 << i)) != 0) {
+ __ Ld(a2, MemOperand(sp, i * kPointerSize));
+ __ Sd(a2, MemOperand(a1, offset));
+ } else if (FLAG_debug_code) {
+ __ li(a2, kDebugZapValue);
+ __ Sd(a2, MemOperand(a1, offset));
+ }
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Copy FPU registers to
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ Ldc1(f0, MemOperand(sp, src_offset));
+ __ Sdc1(f0, MemOperand(a1, dst_offset));
+ }
+
+ // Remove the saved registers from the stack.
+ __ Daddu(sp, sp, Operand(kSavedRegistersAreaSize));
+
+ // Compute a pointer to the unwinding limit in register a2; that is
+ // the first stack slot not part of the input frame.
+ __ Ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+ __ Daddu(a2, a2, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Daddu(a3, a1, Operand(FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ BranchShort(&pop_loop_header);
+ __ bind(&pop_loop);
+ __ pop(a4);
+ __ Sd(a4, MemOperand(a3, 0));
+ __ daddiu(a3, a3, sizeof(uint64_t));
+ __ bind(&pop_loop_header);
+ __ BranchShort(&pop_loop, ne, a2, Operand(sp));
+ // Compute the output frame in the deoptimizer.
+ __ push(a0); // Preserve deoptimizer object across call.
+ // a0: deoptimizer object; a1: scratch.
+ __ PrepareCallCFunction(1, a1);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
+
+ __ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: a4 = current "FrameDescription** output_",
+ // a1 = one past the last FrameDescription**.
+ __ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+ __ Ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
+ __ Dlsa(a1, a4, a1, kPointerSizeLog2);
+ __ BranchShort(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
+ __ Ld(a2, MemOperand(a4, 0)); // output_[ix]
+ __ Ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+ __ BranchShort(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
+ __ Daddu(a6, a2, Operand(a3));
+ __ Ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
+ __ push(a7);
+ __ bind(&inner_loop_header);
+ __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
+
+ __ Daddu(a4, a4, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
+
+ __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
+ }
+
+ // Push pc and continuation from the last output frame.
+ __ Ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
+ __ push(a6);
+ __ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
+ __ push(a6);
+
+ // Technically restoring 'at' should work unless zero_reg is also restored
+ // but it's safer to check for this.
+ DCHECK(!(at.bit() & restored_regs));
+ // Restore the registers from the last output frame.
+ __ mov(at, a2);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ Ld(ToRegister(i), MemOperand(at, offset));
+ }
+ }
+
+ __ pop(at); // Get continuation, leave pc on stack.
+ __ pop(ra);
+ __ Jump(at);
+ __ stop();
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 8f262818ab..efd65e2971 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -123,7 +123,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Push(cp, r3);
__ SmiUntag(r3, SetRC);
-#ifdef V8_REVERSE_JSARGS
// Set up pointer to last argument (skip receiver).
__ addi(
r7, fp,
@@ -132,15 +131,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ PushArray(r7, r3, r8, r0);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-#else
- // The receiver for the builtin/api call.
- __ PushRoot(RootIndex::kTheHoleValue);
- // Set up pointer to last argument.
- __ addi(r7, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- __ PushArray(r7, r3, r8, r0);
-#endif
// Call the function.
// r3: number of arguments (untagged)
@@ -240,7 +230,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore new target.
__ Pop(r6);
-#ifdef V8_REVERSE_JSARGS
// Push the allocated receiver to the stack.
__ Push(r3);
// We need two copies because we may have to return the original one
@@ -254,15 +243,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ addi(
r7, fp,
Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
-#else
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(r3, r3);
-
- // Set up pointer to last argument.
- __ addi(r7, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-#endif
// ----------- S t a t e -------------
// -- r6: new target
@@ -295,10 +275,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Copy arguments and receiver to the expression stack.
__ PushArray(r7, r3, r8, r0);
-#ifdef V8_REVERSE_JSARGS
// Push implicit receiver.
__ Push(r9);
-#endif
// Call the function.
{
@@ -435,19 +413,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ cmpl(sp, scratch);
__ blt(&stack_overflow);
-#ifndef V8_REVERSE_JSARGS
- // Push receiver.
- __ LoadTaggedPointerField(
- scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
- __ Push(scratch);
-#endif
-
// ----------- S t a t e -------------
// -- r4 : the JSGeneratorObject to resume
// -- r7 : generator function
// -- cp : generator context
// -- lr : return address
- // -- sp[0] : generator receiver
// -----------------------------------
// Copy the function arguments from the generator object's register file.
@@ -459,9 +429,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
r5,
FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset));
{
-#ifdef V8_REVERSE_JSARGS
Label done_loop, loop;
-
__ mr(r9, r6);
__ bind(&loop);
@@ -481,24 +449,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ LoadAnyTaggedField(
scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
__ Push(scratch);
-#else
- Label loop, done_loop;
- __ cmpi(r6, Operand::Zero());
- __ ble(&done_loop);
-
- // setup r9 to first element address - kTaggedSize
- __ addi(r9, r5,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
-
- __ mtctr(r6);
- __ bind(&loop);
- __ LoadAnyTaggedField(scratch, MemOperand(r9, kTaggedSize));
- __ addi(r9, r9, Operand(kTaggedSize));
- __ push(scratch);
- __ bdnz(&loop);
-
- __ bind(&done_loop);
-#endif
}
// Underlying function needs to have bytecode available.
@@ -514,6 +464,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
+ __ LoadP(r3, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadHalfWord(
+ r3,
+ FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -804,7 +758,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r4: function
// r7: argc
// r8: argv, i.e. points to first arg
-#ifdef V8_REVERSE_JSARGS
Label loop, done;
__ cmpi(r7, Operand::Zero());
__ beq(&done);
@@ -823,24 +776,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the receiver.
__ Push(r6);
-#else
- // Push the receiver.
- __ Push(r6);
-
- Label loop, done;
- __ cmpi(r7, Operand::Zero());
- __ beq(&done);
-
- __ mtctr(r7);
- __ subi(r8, r8, Operand(kSystemPointerSize));
- __ bind(&loop);
- __ LoadPU(r9, MemOperand(r8, kSystemPointerSize)); // read next parameter
- __ LoadP(r0, MemOperand(r9)); // dereference handle
- __ push(r0); // push parameter
- __ bdnz(&loop);
- __ bind(&done);
-#endif
-
// r3: argc
// r4: function
// r6: new.target
@@ -918,13 +853,13 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ add(sp, sp, args_count);
}
-// Tail-call |function_id| if |smi_entry| == |marker|
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register smi_entry,
- OptimizationMarker marker,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ CmpSmiLiteral(smi_entry, Smi::FromEnum(marker), r0);
+ __ cmpi(actual_marker, Operand(expected_marker));
__ bne(&no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -941,10 +876,15 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(r4, r6, optimized_code_entry, scratch));
Register closure = r4;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
+ &heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- Label found_deoptimized_code;
__ LoadTaggedPointerField(
scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
@@ -952,7 +892,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
- __ bne(&found_deoptimized_code, cr0);
+ __ bne(&heal_optimized_code_slot, cr0);
// Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code.
@@ -962,10 +902,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadCodeObjectEntry(r5, optimized_code_entry);
__ Jump(r5);
- // Optimized code slot contains deoptimized code, evict it and re-enter
- // the closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
@@ -975,7 +916,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -- r6 : new target (preserved for callee if needed, and caller)
// -- r4 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -- optimization_marker : a int32 containing a non-zero optimization
+ // marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r4, r6, optimization_marker));
@@ -992,13 +934,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
if (FLAG_debug_code) {
- __ CmpSmiLiteral(optimization_marker,
- Smi::FromEnum(OptimizationMarker::kInOptimizationQueue),
- r0);
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
+ __ stop();
}
}
@@ -1135,18 +1075,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
__ bne(&push_stack_frame);
- Register optimized_code_entry = r7;
+ Register optimization_state = r7;
- // Read off the optimized code slot in the feedback vector.
- __ LoadAnyTaggedField(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
- // Check if the optimized code slot is not empty.
- Label optimized_code_slot_not_empty;
- __ CmpSmiLiteral(optimized_code_entry,
- Smi::FromEnum(OptimizationMarker::kNone), r0);
- __ bne(&optimized_code_slot_not_empty);
+ // Read off the optimization state in the feedback vector.
+ __ LoadWord(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset),
+ r0);
+
+ // Check if the optimized code slot is not empty or has a optimization marker.
+ Label has_optimized_code_or_marker;
+ __ TestBitMask(optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
+ r0);
+ __ bne(&has_optimized_code_or_marker, cr0);
Label not_optimized;
__ bind(&not_optimized);
@@ -1233,8 +1174,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
- LoadStackLimit(masm, r6, StackLimitKind::kInterruptStackLimit);
- __ cmpl(sp, r6);
+ LoadStackLimit(masm, r0, StackLimitKind::kInterruptStackLimit);
+ __ cmpl(sp, r0);
__ blt(&stack_check_interrupt);
__ bind(&after_stack_check_interrupt);
@@ -1299,25 +1240,33 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
- __ SmiTag(r6, kInterpreterBytecodeOffsetRegister);
- __ StoreP(r6,
+ __ SmiTag(r0, kInterpreterBytecodeOffsetRegister);
+ __ StoreP(r0,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ jmp(&after_stack_check_interrupt);
- __ bind(&optimized_code_slot_not_empty);
+ __ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
- // Check if optimized code marker is actually a weak reference to the
- // optimized code.
- __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
- MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+
+ // Check if optimized code is available
+ __ TestBitMask(optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
+ r0);
+ __ beq(&maybe_has_optimized_code, cr0);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
- // Load code entry from the weak reference, if it was cleared, resume
- // execution of unoptimized code.
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ Register optimized_code_entry = optimization_state;
+ __ LoadAnyTaggedField(
+ optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r9);
__ bind(&compile_lazy);
@@ -1336,12 +1285,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ ShiftLeftImm(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ sub(start_address, start_address, scratch);
// Push the arguments.
-#ifdef V8_REVERSE_JSARGS
__ PushArray(start_address, num_args, scratch, r0,
TurboAssembler::PushArrayOrder::kReverse);
-#else
- __ PushArray(start_address, num_args, scratch, r0);
-#endif
}
// static
@@ -1358,19 +1303,15 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// -----------------------------------
Label stack_overflow;
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ subi(r3, r3, Operand(1));
}
-#endif
// Calculate number of arguments (add one for receiver).
__ addi(r6, r3, Operand(1));
-
Generate_StackOverflowCheck(masm, r6, ip, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Don't copy receiver. Argument count is correct.
__ mr(r6, r3);
@@ -1389,21 +1330,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// lies in the next interpreter register.
__ LoadP(r5, MemOperand(r5, -kSystemPointerSize));
}
-#else
- // Push "undefined" as the receiver arg if we need to.
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(RootIndex::kUndefinedValue);
- __ mr(r6, r3); // Argument count is correct.
- }
-
- // Push the arguments. r5, r6, r7 will be modified.
- Generate_InterpreterPushArgs(masm, r6, r5, r7);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(r5); // Pass the spread in a register
- __ subi(r3, r3, Operand(1)); // Subtract one for spread
- }
-#endif
// Call the target.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
@@ -1436,7 +1362,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ addi(r8, r3, Operand(1));
Generate_StackOverflowCheck(masm, r8, ip, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ subi(r3, r3, Operand(1));
@@ -1458,22 +1383,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
} else {
__ AssertUndefinedOrAllocationSite(r5, r8);
}
-#else
-
- // Push a slot for the receiver to be constructed.
- __ li(r0, Operand::Zero());
- __ push(r0);
-
- // Push the arguments (skip if none).
- Generate_InterpreterPushArgs(masm, r3, r7, r8);
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(r5); // Pass the spread in a register
- __ subi(r3, r3, Operand(1)); // Subtract one for spread
- } else {
- __ AssertUndefinedOrAllocationSite(r5, r8);
- }
-
-#endif
if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(r4);
@@ -1642,7 +1551,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
int allocatable_register_count = config->num_allocatable_general_registers();
Register scratch = ip;
if (with_result) {
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin) {
__ mr(scratch, r3);
} else {
@@ -1654,16 +1562,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
kSystemPointerSize +
BuiltinContinuationFrameConstants::kFixedFrameSize));
}
-#else
- // Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point.
- __ StoreP(
- r3,
- MemOperand(sp, config->num_allocatable_general_registers() *
- kSystemPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize));
- USE(scratch);
-#endif
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -1672,7 +1570,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ SmiUntag(Register::from_code(code));
}
}
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin && with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. r0 contains the arguments count, the return value
@@ -1685,7 +1582,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ subi(r3, r3,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
}
-#endif
__ LoadP(
fp,
MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
@@ -1783,9 +1679,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argc
- // -- sp[0] : argArray
+ // -- sp[0] : receiver
// -- sp[4] : thisArg
- // -- sp[8] : receiver
+ // -- sp[8] : argArray
// -----------------------------------
// 1. Load receiver into r4, argArray into r5 (if present), remove all
@@ -1795,9 +1691,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadRoot(r8, RootIndex::kUndefinedValue);
__ mr(r5, r8);
-#ifdef V8_REVERSE_JSARGS
Label done;
-
__ LoadP(r4, MemOperand(sp)); // receiver
__ cmpi(r3, Operand(1));
__ blt(&done);
@@ -1807,24 +1701,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadP(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
-#else
- Label done;
- __ ShiftLeftImm(r4, r3, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r4, MemOperand(sp, r4)); // receiver
-
- __ li(r0, Operand(1));
- __ sub(r7, r3, r0, LeaveOE, SetRC);
- __ blt(&done, cr0);
- __ ShiftLeftImm(r8, r7, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r8, MemOperand(sp, r8));
-
- __ sub(r7, r7, r0, LeaveOE, SetRC);
- __ blt(&done, cr0);
- __ ShiftLeftImm(r5, r7, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r5, MemOperand(sp, r5));
-
- __ bind(&done);
-#endif
__ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2));
__ add(sp, sp, ip);
__ StoreP(r8, MemOperand(sp));
@@ -1860,7 +1736,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
-#ifdef V8_REVERSE_JSARGS
// 1. Get the callable to call (passed as receiver) from the stack.
__ Pop(r4);
@@ -1877,46 +1752,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Adjust the actual number of arguments.
__ subi(r3, r3, Operand(1));
-#else
- // 1. Make sure we have at least one argument.
- // r3: actual number of arguments
- {
- Label done;
- __ cmpi(r3, Operand::Zero());
- __ bne(&done);
- __ PushRoot(RootIndex::kUndefinedValue);
- __ addi(r3, r3, Operand(1));
- __ bind(&done);
- }
-
- // 2. Get the callable to call (passed as receiver) from the stack.
- // r3: actual number of arguments
- __ LoadReceiver(r4, r3);
-
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- // r3: actual number of arguments
- // r4: callable
- {
- Register scratch = r6;
- Label loop;
- // Calculate the copy start address (destination). Copy end address is sp.
- __ ShiftLeftImm(r5, r3, Operand(kSystemPointerSizeLog2));
- __ add(r5, sp, r5);
-
- __ mtctr(r3);
- __ bind(&loop);
- __ LoadP(scratch, MemOperand(r5, -kSystemPointerSize));
- __ StoreP(scratch, MemOperand(r5));
- __ subi(r5, r5, Operand(kSystemPointerSize));
- __ bdnz(&loop);
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ subi(r3, r3, Operand(1));
- __ pop();
- }
-#endif
// 4. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -1925,10 +1760,10 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argc
- // -- sp[0] : argumentsList
- // -- sp[4] : thisArgument
- // -- sp[8] : target
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[4] : target (if argc >= 1)
+ // -- sp[8] : thisArgument (if argc >= 2)
+ // -- sp[12] : argumentsList (if argc == 3)
// -----------------------------------
// 1. Load target into r4 (if present), argumentsList into r5 (if present),
@@ -1939,9 +1774,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ mr(r8, r4);
__ mr(r5, r4);
-#ifdef V8_REVERSE_JSARGS
Label done;
-
__ cmpi(r3, Operand(1));
__ blt(&done);
__ LoadP(r4, MemOperand(sp, kSystemPointerSize)); // thisArg
@@ -1953,26 +1786,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadP(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
-#else
- Label done;
- __ li(r0, Operand(1));
- __ sub(r7, r3, r0, LeaveOE, SetRC);
- __ blt(&done, cr0);
- __ ShiftLeftImm(r4, r7, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r4, MemOperand(sp, r4)); // receiver
-
- __ sub(r7, r7, r0, LeaveOE, SetRC);
- __ blt(&done, cr0);
- __ ShiftLeftImm(r8, r7, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r8, MemOperand(sp, r8));
-
- __ sub(r7, r7, r0, LeaveOE, SetRC);
- __ blt(&done, cr0);
- __ ShiftLeftImm(r5, r7, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r5, MemOperand(sp, r5));
-
- __ bind(&done);
-#endif
__ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2));
__ add(sp, sp, ip);
__ StoreP(r8, MemOperand(sp));
@@ -1996,12 +1809,11 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argc
- // -- sp[0] : new.target (optional)
- // -- sp[4] : argumentsList
- // -- sp[8] : target
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[4] : target
+ // -- sp[8] : argumentsList
+ // -- sp[12] : new.target (optional)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
// 1. Load target into r4 (if present), argumentsList into r5 (if present),
// new.target into r6 (if present, otherwise use target), remove all
@@ -2011,9 +1823,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadRoot(r4, RootIndex::kUndefinedValue);
__ mr(r5, r4);
-#ifdef V8_REVERSE_JSARGS
Label done;
-
__ mr(r7, r4);
__ cmpi(r3, Operand(1));
__ blt(&done);
@@ -2029,31 +1839,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2));
__ add(sp, sp, r0);
__ StoreP(r7, MemOperand(sp));
-#else
- Label done;
- __ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2));
- __ StorePX(r5, MemOperand(sp, ip));
- __ li(r0, Operand(1));
- __ sub(r7, r3, r0, LeaveOE, SetRC);
- __ blt(&done, cr0);
- __ ShiftLeftImm(r4, r7, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r4, MemOperand(sp, r4)); // receiver
-
- __ mr(r6, r4);
- __ sub(r7, r7, r0, LeaveOE, SetRC);
- __ blt(&done, cr0);
- __ ShiftLeftImm(r5, r7, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r5, MemOperand(sp, r5));
-
- __ sub(r7, r7, r0, LeaveOE, SetRC);
- __ blt(&done, cr0);
- __ ShiftLeftImm(r6, r7, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r6, MemOperand(sp, r6));
-
- __ bind(&done);
- __ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2));
- __ add(sp, sp, r0);
-#endif
}
// ----------- S t a t e -------------
@@ -2142,7 +1927,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label stack_overflow;
Generate_StackOverflowCheck(masm, r7, scratch, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Move the arguments already in the stack,
// including the receiver and the return address.
{
@@ -2161,7 +1945,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ StorePU(r0, MemOperand(dest, kSystemPointerSize));
__ bdnz(&copy);
}
-#endif
// Push arguments onto the stack (thisArgument is already on the stack).
{
@@ -2178,11 +1961,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bne(&skip);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ bind(&skip);
-#ifdef V8_REVERSE_JSARGS
__ StorePU(scratch, MemOperand(r8, kSystemPointerSize));
-#else
- __ push(scratch);
-#endif
__ bdnz(&loop);
__ bind(&no_args);
__ add(r3, r3, r7);
@@ -2270,7 +2049,6 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Generate_StackOverflowCheck(masm, r8, scratch, &stack_overflow);
// Forward the arguments from the caller frame.
-#ifdef V8_REVERSE_JSARGS
// Point to the first argument to copy (skipping the receiver).
__ addi(r7, r7,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
@@ -2296,15 +2074,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ StorePU(r0, MemOperand(dest, kSystemPointerSize));
__ bdnz(&copy);
}
-#endif
// Copy arguments from the caller frame.
// TODO(victorgomes): Consider using forward order as potentially more cache
// friendly.
{
Label loop;
-#ifndef V8_REVERSE_JSARGS
- __ addi(r7, r7, Operand(CommonFrameConstants::kFixedFrameSizeAboveFp));
-#endif
__ add(r3, r3, r8);
__ addi(r5, r5, Operand(kSystemPointerSize));
__ bind(&loop);
@@ -2312,11 +2086,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ subi(r8, r8, Operand(1));
__ ShiftLeftImm(scratch, r8, Operand(kSystemPointerSizeLog2));
__ LoadPX(r0, MemOperand(r7, scratch));
-#ifdef V8_REVERSE_JSARGS
__ StorePX(r0, MemOperand(r5, scratch));
-#else
- __ push(r0);
-#endif
__ cmpi(r8, Operand::Zero());
__ bne(&loop);
}
@@ -2480,7 +2250,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
// Pop receiver.
__ Pop(r8);
@@ -2503,44 +2272,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Push receiver.
__ Push(r8);
-#else
- __ mr(scratch, sp);
- __ mr(sp, r0);
-
- // Relocate arguments down the stack.
- // -- r3 : the number of arguments (not including the receiver)
- // -- r9 : the previous stack pointer
- // -- r10: the size of the [[BoundArguments]]
- {
- Label skip, loop;
- __ li(r8, Operand::Zero());
- __ cmpi(r3, Operand::Zero());
- __ beq(&skip);
- __ mtctr(r3);
- __ bind(&loop);
- __ LoadPX(r0, MemOperand(scratch, r8));
- __ StorePX(r0, MemOperand(sp, r8));
- __ addi(r8, r8, Operand(kSystemPointerSize));
- __ bdnz(&loop);
- __ bind(&skip);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop;
- __ ShiftLeftImm(r10, r7, Operand(kTaggedSizeLog2));
- __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, r10);
- __ mtctr(r7);
- __ bind(&loop);
- __ LoadAnyTaggedField(ip, MemOperand(r5, -kTaggedSize), r0);
- __ StorePX(ip, MemOperand(sp, r8));
- __ addi(r8, r8, Operand(kSystemPointerSize));
- __ addi(r5, r5, Operand(-kTaggedSize));
- __ bdnz(&loop);
- __ add(r3, r3, r7);
- }
-#endif
}
__ bind(&no_bound_arguments);
}
@@ -2736,17 +2467,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r6 : new target (passed through to callee)
// -----------------------------------
- Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
+ Label dont_adapt_arguments, stack_overflow;
__ cmpli(r5, Operand(kDontAdaptArgumentsSentinel));
__ beq(&dont_adapt_arguments);
__ LoadTaggedPointerField(
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
-#ifndef V8_REVERSE_JSARGS
- __ TestBitMask(r7, SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask,
- r0);
- __ bne(&skip_adapt_arguments, cr0);
-#endif
// -------------------------------------------
// Adapt arguments.
@@ -2767,13 +2493,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: function
// r5: expected number of arguments
// r6: new target (passed through to callee)
-#ifdef V8_REVERSE_JSARGS
__ ShiftLeftImm(r3, r5, Operand(kSystemPointerSizeLog2));
__ add(r3, r3, fp);
-#else
- __ SmiToPtrArrayOffset(r3, r3);
- __ add(r3, r3, fp);
-#endif
// adjust for return address and receiver
__ addi(r3, r3, Operand(2 * kSystemPointerSize));
__ ShiftLeftImm(r7, r5, Operand(kSystemPointerSizeLog2));
@@ -2803,7 +2524,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
EnterArgumentsAdaptorFrame(masm);
Generate_StackOverflowCheck(masm, r5, r8, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Fill the remaining expected arguments with undefined.
// r0: actual number of arguments as a smi
// r1: function
@@ -2848,47 +2568,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ cmp(r3, fp); // Compare before moving to next argument.
__ subi(r3, r3, Operand(kSystemPointerSize));
__ b(ne, &copy);
-#else
- // Calculate copy start address into r0 and copy end address is fp.
- // r3: actual number of arguments as a smi
- // r4: function
- // r5: expected number of arguments
- // r6: new target (passed through to callee)
- __ SmiToPtrArrayOffset(r3, r3);
- __ add(r3, r3, fp);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r3: copy start address
- // r4: function
- // r5: expected number of arguments
- // r6: new target (passed through to callee)
- Label copy;
- __ bind(&copy);
- // Adjust load for return address and receiver.
- __ LoadP(r0, MemOperand(r3, 2 * kSystemPointerSize));
- __ push(r0);
- __ cmp(r3, fp); // Compare before moving to next argument.
- __ subi(r3, r3, Operand(kSystemPointerSize));
- __ bne(&copy);
-
- // Fill the remaining expected arguments with undefined.
- // r4: function
- // r5: expected number of arguments
- // r6: new target (passed through to callee)
- __ LoadRoot(r0, RootIndex::kUndefinedValue);
- __ ShiftLeftImm(r7, r5, Operand(kSystemPointerSizeLog2));
- __ sub(r7, fp, r7);
- // Adjust for frame.
- __ subi(r7, r7,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kSystemPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ push(r0);
- __ cmp(sp, r7);
- __ bne(&fill);
-#endif
}
// Call the entry point.
@@ -2911,42 +2590,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
// -------------------------------------------
- // Skip adapt arguments.
- // -------------------------------------------
- __ bind(&skip_adapt_arguments);
- {
- // The callee cannot observe the actual arguments, so it's safe to just
- // pass the expected arguments by massaging the stack appropriately. See
- // http://bit.ly/v8-faster-calls-with-arguments-mismatch for details.
- Label under_application, over_application;
- __ cmp(r3, r5);
- __ blt(&under_application);
-
- __ bind(&over_application);
- {
- // Remove superfluous parameters from the stack.
- __ sub(r7, r3, r5);
- __ mr(r3, r5);
- __ ShiftLeftImm(r7, r7, Operand(kSystemPointerSizeLog2));
- __ add(sp, sp, r7);
- __ b(&dont_adapt_arguments);
- }
-
- __ bind(&under_application);
- {
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ LoadRoot(r7, RootIndex::kUndefinedValue);
- __ bind(&fill);
- __ addi(r3, r3, Operand(1));
- __ push(r7);
- __ cmp(r3, r5);
- __ blt(&fill);
- __ b(&dont_adapt_arguments);
- }
- }
-
- // -------------------------------------------
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
@@ -3451,12 +3094,11 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- r5 : arguments count (not including the receiver)
// -- r6 : call data
// -- r3 : holder
- // -- sp[0] : last argument
+ // -- sp[0] : receiver
+ // -- sp[8] : first argument
// -- ...
- // -- sp[(argc - 1)* 4] : first argument
- // -- sp[(argc + 0) * 4] : receiver
+ // -- sp[(argc) * 8] : last argument
// -----------------------------------
- // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
Register api_function_address = r4;
Register argc = r5;
@@ -3531,15 +3173,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
-#ifdef V8_REVERSE_JSARGS
__ addi(scratch, scratch,
Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
-#else
- __ addi(scratch, scratch,
- Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
- __ ShiftLeftImm(ip, argc, Operand(kSystemPointerSizeLog2));
- __ add(scratch, scratch, ip);
-#endif
__ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
kSystemPointerSize));
@@ -3698,6 +3333,252 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ blr();
}
+namespace {
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Unlike on ARM we don't save all the registers, just the useful ones.
+ // For the rest, there are gaps on the stack, so the offsets remain the same.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+ RegList saved_regs = restored_regs | sp.bit();
+
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
+
+ // Save all double registers before messing with them.
+ __ subi(sp, sp, Operand(kDoubleRegsSize));
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister dreg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ stfd(dreg, MemOperand(sp, offset));
+ }
+
+ // Push saved_regs (needed to populate FrameDescription::registers_).
+ // Leave gaps for other registers.
+ __ subi(sp, sp, Operand(kNumberOfRegisters * kSystemPointerSize));
+ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+ if ((saved_regs & (1 << i)) != 0) {
+ __ StoreP(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
+ }
+ }
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ Move(scratch, ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, isolate));
+ __ StoreP(fp, MemOperand(scratch));
+ }
+ const int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
+
+ // Get the bailout id is passed as r29 by the caller.
+ __ mr(r5, r29);
+
+ __ mov(r5, Operand(Deoptimizer::kFixedExitSizeMarker));
+ // Get the address of the location in the code object (r6) (return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register r7.
+ __ mflr(r6);
+ __ addi(r7, sp, Operand(kSavedRegistersAreaSize));
+ __ sub(r7, fp, r7);
+
+ // Allocate a new deoptimizer object.
+ // Pass six arguments in r3 to r8.
+ __ PrepareCallCFunction(6, r8);
+ __ li(r3, Operand::Zero());
+ Label context_check;
+ __ LoadP(r4, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(r4, &context_check);
+ __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ li(r4, Operand(static_cast<int>(deopt_kind)));
+ // r5: bailout id already loaded.
+ // r6: code address or 0 already loaded.
+ // r7: Fp-to-sp delta.
+ __ Move(r8, ExternalReference::isolate_address(isolate));
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve "deoptimizer" object in register r3 and get the input
+ // frame descriptor pointer to r4 (deoptimizer->input_);
+ __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ __ LoadP(r5, MemOperand(sp, i * kSystemPointerSize));
+ __ StoreP(r5, MemOperand(r4, offset));
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Copy double registers to
+ // double_registers_[DoubleRegister::kNumRegisters]
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset =
+ code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
+ __ lfd(d0, MemOperand(sp, src_offset));
+ __ stfd(d0, MemOperand(r4, dst_offset));
+ }
+
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register zero = r7;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ li(zero, Operand(0));
+ __ stb(zero, MemOperand(is_iterable));
+ }
+
+ // Remove the saved registers from the stack.
+ __ addi(sp, sp, Operand(kSavedRegistersAreaSize));
+
+ // Compute a pointer to the unwinding limit in register r5; that is
+ // the first stack slot not part of the input frame.
+ __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
+ __ add(r5, r5, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ addi(r6, r4, Operand(FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ b(&pop_loop_header);
+ __ bind(&pop_loop);
+ __ pop(r7);
+ __ StoreP(r7, MemOperand(r6, 0));
+ __ addi(r6, r6, Operand(kSystemPointerSize));
+ __ bind(&pop_loop_header);
+ __ cmp(r5, sp);
+ __ bne(&pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ push(r3); // Preserve deoptimizer object across call.
+ // r3: deoptimizer object; r4: scratch.
+ __ PrepareCallCFunction(1, r4);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ pop(r3); // Restore deoptimizer object (class Deoptimizer).
+
+ __ LoadP(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: r7 = current "FrameDescription** output_",
+ // r4 = one past the last FrameDescription**.
+ __ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
+ __ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
+ __ ShiftLeftImm(r4, r4, Operand(kSystemPointerSizeLog2));
+ __ add(r4, r7, r4);
+ __ b(&outer_loop_header);
+
+ __ bind(&outer_push_loop);
+ // Inner loop state: r5 = current FrameDescription*, r6 = loop index.
+ __ LoadP(r5, MemOperand(r7, 0)); // output_[ix]
+ __ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
+ __ b(&inner_loop_header);
+
+ __ bind(&inner_push_loop);
+ __ addi(r6, r6, Operand(-sizeof(intptr_t)));
+ __ add(r9, r5, r6);
+ __ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
+ __ push(r9);
+
+ __ bind(&inner_loop_header);
+ __ cmpi(r6, Operand::Zero());
+ __ bne(&inner_push_loop); // test for gt?
+
+ __ addi(r7, r7, Operand(kSystemPointerSize));
+ __ bind(&outer_loop_header);
+ __ cmp(r7, r4);
+ __ blt(&outer_push_loop);
+
+ __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister dreg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ lfd(dreg, MemOperand(r4, src_offset));
+ }
+
+ // Push pc, and continuation from the last output frame.
+ __ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset()));
+ __ push(r9);
+ __ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset()));
+ __ push(r9);
+
+ // Restore the registers from the last output frame.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ DCHECK(!(scratch.bit() & restored_regs));
+ __ mr(scratch, r5);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ LoadP(ToRegister(i), MemOperand(scratch, offset));
+ }
+ }
+ }
+
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register one = r7;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ li(one, Operand(1));
+ __ stb(one, MemOperand(is_iterable));
+ }
+
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ pop(scratch); // get continuation, leave pc on stack
+ __ pop(r0);
+ __ mtlr(r0);
+ __ Jump(scratch);
+ }
+
+ __ stop();
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/regexp.tq b/deps/v8/src/builtins/regexp.tq
index e09ddf3d7c..7c043efa55 100644
--- a/deps/v8/src/builtins/regexp.tq
+++ b/deps/v8/src/builtins/regexp.tq
@@ -184,7 +184,8 @@ extern enum Flag constexpr 'JSRegExp::Flag' {
kMultiline,
kSticky,
kUnicode,
- kDotAll
+ kDotAll,
+ kLinear
}
const kRegExpPrototypeOldFlagGetter: constexpr int31
@@ -244,6 +245,13 @@ transitioning javascript builtin RegExpPrototypeMultilineGetter(
'RegExp.prototype.multiline');
}
+transitioning javascript builtin RegExpPrototypeLinearGetter(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ return FlagGetter(
+ receiver, Flag::kLinear, kRegExpPrototypeOldFlagGetter,
+ 'RegExp.prototype.linear');
+}
+
// ES #sec-get-regexp.prototype.dotAll
transitioning javascript builtin RegExpPrototypeDotAllGetter(
js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 3743df4ddb..8cc3a949c3 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -122,7 +122,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Push(cp, r2);
__ SmiUntag(r2);
-#ifdef V8_REVERSE_JSARGS
// Set up pointer to last argument (skip receiver).
__ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize));
@@ -130,15 +129,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ PushArray(r6, r2, r1, r0);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-#else
- // The receiver for the builtin/api call.
- __ PushRoot(RootIndex::kTheHoleValue);
- // Set up pointer to last argument.
- __ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- __ PushArray(r6, r2, r1, r0);
-#endif
// Call the function.
// r2: number of arguments
@@ -236,7 +226,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore new target.
__ Pop(r5);
-#ifdef V8_REVERSE_JSARGS
// Push the allocated receiver to the stack.
__ Push(r2);
// We need two copies because we may have to return the original one
@@ -249,15 +238,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Set up pointer to first argument (skip receiver).
__ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize));
-#else
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(r2, r2);
-
- // Set up pointer to last argument.
- __ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
-#endif
// ----------- S t a t e -------------
// -- r5: new target
@@ -290,10 +270,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Copy arguments and receiver to the expression stack.
__ PushArray(r6, r2, r1, r0);
-#ifdef V8_REVERSE_JSARGS
// Push implicit receiver.
__ Push(r8);
-#endif
// Call the function.
__ InvokeFunctionWithNewTarget(r3, r5, r2, CALL_FUNCTION);
@@ -428,19 +406,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ CmpLogicalP(sp, scratch);
__ blt(&stack_overflow);
-#ifndef V8_REVERSE_JSARGS
- // Push receiver.
- __ LoadTaggedPointerField(
- scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
- __ Push(scratch);
-#endif
-
// ----------- S t a t e -------------
// -- r3 : the JSGeneratorObject to resume
// -- r6 : generator function
// -- cp : generator context
// -- lr : return address
- // -- sp[0] : generator receiver
// -----------------------------------
// Copy the function arguments from the generator object's register file.
@@ -452,7 +422,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
r4,
FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
{
-#ifdef V8_REVERSE_JSARGS
Label done_loop, loop;
__ LoadRR(r8, r5);
@@ -472,34 +441,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ LoadAnyTaggedField(
scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
__ Push(scratch);
-#else
- Label loop, done_loop;
- __ ShiftLeftP(r1, r5, Operand(kSystemPointerSizeLog2));
- __ SubP(sp, r1);
-
- __ ShiftLeftP(r5, r5, Operand(kTaggedSizeLog2));
-
- // ip = stack offset
- // r5 = parameter array offset
- __ LoadImmP(ip, Operand::Zero());
- __ SubP(r5, Operand(kTaggedSize));
- __ blt(&done_loop);
-
- __ lghi(r1, Operand(-kTaggedSize));
-
- __ bind(&loop);
-
- // parameter copy loop
- __ LoadAnyTaggedField(r0, FieldMemOperand(r4, r5, FixedArray::kHeaderSize));
- __ StoreP(r0, MemOperand(sp, ip));
-
- // update offsets
- __ lay(ip, MemOperand(ip, kSystemPointerSize));
-
- __ BranchRelativeOnIdxHighP(r5, r1, &loop);
-
- __ bind(&done_loop);
-#endif
}
// Underlying function needs to have bytecode available.
@@ -515,6 +456,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
+ __ LoadP(r2, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadHalfWordP(
+ r2,
+ FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -867,8 +812,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r7: scratch reg to hold scaled argc
// r8: scratch reg to hold arg handle
// r9: scratch reg to hold index into argv
-
-#ifdef V8_REVERSE_JSARGS
Label argLoop, argExit;
__ ShiftLeftP(r9, r2, Operand(kSystemPointerSizeLog2));
@@ -891,28 +834,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the receiver.
__ Push(r5);
-#else
- // Push the receiver.
- __ Push(r5);
-
- Label argLoop, argExit;
-
- __ LoadRR(r9, r6);
- __ ltgr(r7, r2);
- __ beq(&argExit, Label::kNear);
- __ bind(&argLoop);
-
- __ LoadP(r8, MemOperand(r9)); // read next parameter
- __ LoadP(r0, MemOperand(r8)); // dereference handle
- __ Push(r0);
- __ la(r9, MemOperand(r9, kSystemPointerSize)); // r9++;
- // __ lay(r7, MemOperand(r7, -kSystemPointerSize));
- __ SubP(r7, r7, Operand(1));
- __ bgt(&argLoop);
-
- __ bind(&argExit);
-#endif
-
// Setup new.target, argc and function.
__ LoadRR(r5, r3);
__ LoadRR(r3, r4);
@@ -990,13 +911,13 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ AddP(sp, sp, args_count);
}
-// Tail-call |function_id| if |smi_entry| == |marker|
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register smi_entry,
- OptimizationMarker marker,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ CmpSmiLiteral(smi_entry, Smi::FromEnum(marker), r0);
+ __ CmpP(actual_marker, Operand(expected_marker));
__ bne(&no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -1013,17 +934,22 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(r3, r5, optimized_code_entry, scratch));
Register closure = r3;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
+ &heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- Label found_deoptimized_code;
__ LoadTaggedPointerField(
scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ LoadW(scratch, FieldMemOperand(
scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
- __ bne(&found_deoptimized_code);
+ __ bne(&heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code.
@@ -1033,10 +959,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadCodeObjectEntry(r4, optimized_code_entry);
__ Jump(r4);
- // Optimized code slot contains deoptimized code, evict it and re-enter
- // the closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
@@ -1046,7 +973,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -- r5 : new target (preserved for callee if needed, and caller)
// -- r3 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -- optimization_marker : a int32 containing a non-zero optimization
+ // marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r3, r5, optimization_marker));
@@ -1063,13 +991,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
if (FLAG_debug_code) {
- __ CmpSmiLiteral(optimization_marker,
- Smi::FromEnum(OptimizationMarker::kInOptimizationQueue),
- r0);
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
+ __ stop();
}
}
@@ -1207,19 +1133,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpP(r6, Operand(FEEDBACK_VECTOR_TYPE));
__ bne(&push_stack_frame);
- Register optimized_code_entry = r6;
+ Register optimization_state = r6;
- // Read off the optimized code slot in the feedback vector.
- __ LoadAnyTaggedField(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ // Read off the optimization state in the feedback vector.
+ __ LoadW(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty.
- Label optimized_code_slot_not_empty;
- __ CmpSmiLiteral(optimized_code_entry,
- Smi::FromEnum(OptimizationMarker::kNone), r0);
- __ bne(&optimized_code_slot_not_empty);
+ // Check if the optimized code slot is not empty or has a optimization marker.
+ Label has_optimized_code_or_marker;
+ __ TestBitMask(optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
+ r0);
+ __ bne(&has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
@@ -1287,7 +1212,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
}
// If the bytecode array has a valid incoming new target or generator object
- // register, initialize it with incoming value which was passed in r6.
+ // register, initialize it with incoming value which was passed in r5.
Label no_incoming_new_target_or_generator_register;
__ LoadW(r8, FieldMemOperand(
kInterpreterBytecodeArrayRegister,
@@ -1301,9 +1226,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
- __ LoadP(r5,
+ __ LoadP(r0,
StackLimitAsMemOperand(masm, StackLimitKind::kInterruptStackLimit));
- __ CmpLogicalP(sp, r5);
+ __ CmpLogicalP(sp, r0);
__ blt(&stack_check_interrupt);
__ bind(&after_stack_check_interrupt);
@@ -1350,15 +1275,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r4);
__ Ret();
- __ bind(&optimized_code_slot_not_empty);
- Label maybe_has_optimized_code;
- // Check if optimized code marker is actually a weak reference to the
- // optimized code.
- __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
- MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
- // Fall through if there's no runnable optimized code.
- __ jmp(&not_optimized);
-
__ bind(&stack_check_interrupt);
// Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
// for the call to the StackGuard.
@@ -1378,16 +1294,33 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
- __ SmiTag(r5, kInterpreterBytecodeOffsetRegister);
- __ StoreP(r5,
+ __ SmiTag(r0, kInterpreterBytecodeOffsetRegister);
+ __ StoreP(r0,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ jmp(&after_stack_check_interrupt);
+ __ bind(&has_optimized_code_or_marker);
+ Label maybe_has_optimized_code;
+
+ // Check if optimized code is available
+ __ TestBitMask(optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
+ r0);
+ __ beq(&maybe_has_optimized_code);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+ // Fall through if there's no runnable optimized code.
+ __ jmp(&not_optimized);
+
__ bind(&maybe_has_optimized_code);
- // Load code entry from the weak reference, if it was cleared, resume
- // execution of unoptimized code.
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ Register optimized_code_entry = optimization_state;
+ __ LoadAnyTaggedField(
+ optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8);
__ bind(&compile_lazy);
@@ -1406,12 +1339,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ ShiftLeftP(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ SubP(start_address, start_address, scratch);
// Push the arguments.
-#ifdef V8_REVERSE_JSARGS
__ PushArray(start_address, num_args, r1, scratch,
TurboAssembler::PushArrayOrder::kReverse);
-#else
- __ PushArray(start_address, num_args, r1, scratch);
-#endif
}
// static
@@ -1427,19 +1356,15 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// -- r3 : the target to call (can be any Object).
// -----------------------------------
Label stack_overflow;
-
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ SubP(r2, r2, Operand(1));
}
-#endif
// Calculate number of arguments (AddP one for receiver).
__ AddP(r5, r2, Operand(1));
Generate_StackOverflowCheck(masm, r5, ip, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Don't copy receiver. Argument count is correct.
__ LoadRR(r5, r2);
@@ -1458,20 +1383,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// lies in the next interpreter register.
__ LoadP(r4, MemOperand(r4, -kSystemPointerSize));
}
-#else
- // Push "undefined" as the receiver arg if we need to.
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(RootIndex::kUndefinedValue);
- __ LoadRR(r5, r2); // Argument count is correct.
- }
-
- // Push the arguments.
- Generate_InterpreterPushArgs(masm, r5, r4, r6);
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(r4); // Pass the spread in a register
- __ SubP(r2, r2, Operand(1)); // Subtract one for spread
- }
-#endif
// Call the target.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
@@ -1504,7 +1415,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ AddP(r7, r2, Operand(1));
Generate_StackOverflowCheck(masm, r7, ip, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ SubP(r2, r2, Operand(1));
@@ -1526,22 +1436,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
} else {
__ AssertUndefinedOrAllocationSite(r4, r7);
}
-#else
- // Push a slot for the receiver to be constructed.
- __ LoadImmP(r0, Operand::Zero());
- __ push(r0);
-
- // Push the arguments (skip if none).
- Generate_InterpreterPushArgs(masm, r2, r6, r7);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(r4); // Pass the spread in a register
- __ SubP(r2, r2, Operand(1)); // Subtract one for spread
- } else {
- __ AssertUndefinedOrAllocationSite(r4, r7);
- }
-
-#endif
if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(r3);
@@ -1707,7 +1601,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
int allocatable_register_count = config->num_allocatable_general_registers();
Register scratch = ip;
if (with_result) {
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin) {
__ LoadRR(scratch, r2);
} else {
@@ -1719,16 +1612,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
kSystemPointerSize +
BuiltinContinuationFrameConstants::kFixedFrameSize));
}
-#else
- // Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point.
- __ StoreP(
- r2,
- MemOperand(sp, config->num_allocatable_general_registers() *
- kSystemPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize));
- USE(scratch);
-#endif
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -1737,7 +1620,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ SmiUntag(Register::from_code(code));
}
}
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin && with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. r0 contains the arguments count, the return value
@@ -1750,7 +1632,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ SubP(r2, r2,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
}
-#endif
__ LoadP(
fp,
MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
@@ -1840,9 +1721,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argc
- // -- sp[0] : argArray
+ // -- sp[0] : receiver
// -- sp[4] : thisArg
- // -- sp[8] : receiver
+ // -- sp[8] : argArray
// -----------------------------------
// 1. Load receiver into r3, argArray into r4 (if present), remove all
@@ -1851,8 +1732,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
{
__ LoadRoot(r7, RootIndex::kUndefinedValue);
__ LoadRR(r4, r7);
-
-#ifdef V8_REVERSE_JSARGS
Label done;
__ LoadP(r3, MemOperand(sp)); // receiver
@@ -1864,23 +1743,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadP(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
-#else
- Label done;
- __ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
- __ LoadP(r3, MemOperand(sp, r1)); // receiver
-
- __ SubP(r6, r2, Operand(1));
- __ blt(&done);
- __ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
- __ LoadP(r7, MemOperand(sp, r1));
-
- __ SubP(r6, r6, Operand(1));
- __ blt(&done);
- __ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
- __ LoadP(r4, MemOperand(sp, r1));
-
- __ bind(&done);
-#endif
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ lay(sp, MemOperand(sp, r1));
__ StoreP(r7, MemOperand(sp));
@@ -1916,7 +1778,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
-#ifdef V8_REVERSE_JSARGS
// 1. Get the callable to call (passed as receiver) from the stack.
__ Pop(r3);
@@ -1933,46 +1794,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Adjust the actual number of arguments.
__ SubP(r2, r2, Operand(1));
-#else
- // 1. Make sure we have at least one argument.
- // r2: actual number of arguments
- {
- Label done;
- __ CmpP(r2, Operand::Zero());
- __ bne(&done, Label::kNear);
- __ PushRoot(RootIndex::kUndefinedValue);
- __ AddP(r2, Operand(1));
- __ bind(&done);
- }
-
- // r2: actual number of arguments
- // 2. Get the callable to call (passed as receiver) from the stack.
- __ LoadReceiver(r3, r2);
-
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- // r2: actual number of arguments
- // r3: callable
- {
- Register scratch = r5;
- Label loop;
- // Calculate the copy start address (destination). Copy end address is sp.
- __ ShiftLeftP(r4, r2, Operand(kSystemPointerSizeLog2));
- __ lay(r4, MemOperand(sp, r4));
-
- __ bind(&loop);
- __ LoadP(scratch, MemOperand(r4, -kSystemPointerSize));
- __ StoreP(scratch, MemOperand(r4));
- __ SubP(r4, Operand(kSystemPointerSize));
- __ CmpP(r4, sp);
- __ bne(&loop);
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ SubP(r2, Operand(1));
- __ pop();
- }
-#endif
// 4. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -1981,10 +1802,10 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argc
- // -- sp[0] : argumentsList
- // -- sp[4] : thisArgument
- // -- sp[8] : target
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[4] : target (if argc >= 1)
+ // -- sp[8] : thisArgument (if argc >= 2)
+ // -- sp[12] : argumentsList (if argc == 3)
// -----------------------------------
// 1. Load target into r3 (if present), argumentsList into r4 (if present),
@@ -1995,7 +1816,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRR(r7, r3);
__ LoadRR(r4, r3);
-#ifdef V8_REVERSE_JSARGS
Label done;
__ cghi(r2, Operand(1));
@@ -2009,25 +1829,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadP(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
-#else
- Label done;
- __ SubP(r6, r2, Operand(1));
- __ blt(&done);
- __ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
- __ LoadP(r3, MemOperand(sp, r1)); // receiver
-
- __ SubP(r6, r6, Operand(1));
- __ blt(&done);
- __ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
- __ LoadP(r7, MemOperand(sp, r1));
-
- __ SubP(r6, r6, Operand(1));
- __ blt(&done);
- __ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
- __ LoadP(r4, MemOperand(sp, r1));
-
- __ bind(&done);
-#endif
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ lay(sp, MemOperand(sp, r1));
__ StoreP(r7, MemOperand(sp));
@@ -2051,12 +1852,11 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argc
- // -- sp[0] : new.target (optional)
- // -- sp[4] : argumentsList
- // -- sp[8] : target
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[4] : target
+ // -- sp[8] : argumentsList
+ // -- sp[12] : new.target (optional)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
// 1. Load target into r3 (if present), argumentsList into r4 (if present),
// new.target into r5 (if present, otherwise use target), remove all
@@ -2066,7 +1866,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadRoot(r3, RootIndex::kUndefinedValue);
__ LoadRR(r4, r3);
-#ifdef V8_REVERSE_JSARGS
Label done;
__ LoadRR(r6, r3);
@@ -2084,30 +1883,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ lay(sp, MemOperand(sp, r1));
__ StoreP(r6, MemOperand(sp));
-#else
- Label done;
- __ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
- __ StoreP(r4, MemOperand(sp, r1));
- __ SubP(r6, r2, Operand(1));
- __ blt(&done);
- __ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
- __ LoadP(r3, MemOperand(sp, r1)); // receiver
-
- __ LoadRR(r5, r3);
- __ SubP(r6, r6, Operand(1));
- __ blt(&done);
- __ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
- __ LoadP(r4, MemOperand(sp, r1));
-
- __ SubP(r6, r6, Operand(1));
- __ blt(&done);
- __ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
- __ LoadP(r5, MemOperand(sp, r1));
-
- __ bind(&done);
- __ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
- __ lay(sp, MemOperand(sp, r1));
-#endif
}
// ----------- S t a t e -------------
@@ -2206,7 +1981,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label stack_overflow;
Generate_StackOverflowCheck(masm, r6, scratch, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Move the arguments already in the stack,
// including the receiver and the return address.
{
@@ -2228,7 +2002,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bind(&check);
__ b(ge, &copy);
}
-#endif
// Push arguments onto the stack (thisArgument is already on the stack).
{
@@ -2245,12 +2018,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bne(&skip, Label::kNear);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ bind(&skip);
-#ifdef V8_REVERSE_JSARGS
__ StoreP(scratch, MemOperand(r7));
__ lay(r7, MemOperand(r7, kSystemPointerSize));
-#else
- __ Push(scratch);
-#endif
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
__ AddP(r2, r2, r6);
@@ -2338,7 +2107,6 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Generate_StackOverflowCheck(masm, r7, scratch, &stack_overflow);
// Forward the arguments from the caller frame.
-#ifdef V8_REVERSE_JSARGS
__ LoadRR(r5, r5);
// Point to the first argument to copy (skipping the receiver).
__ AddP(r6, r6,
@@ -2369,26 +2137,19 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&check);
__ b(ge, &copy);
}
-#endif
+
// Copy arguments from the caller frame.
// TODO(victorgomes): Consider using forward order as potentially more cache
// friendly.
{
Label loop;
-#ifndef V8_REVERSE_JSARGS
- __ AddP(r6, r6, Operand(CommonFrameConstants::kFixedFrameSizeAboveFp));
-#endif
__ AddP(r2, r2, r7);
__ bind(&loop);
{
__ SubP(r7, r7, Operand(1));
__ ShiftLeftP(r1, r7, Operand(kSystemPointerSizeLog2));
__ LoadP(scratch, MemOperand(r6, r1));
-#ifdef V8_REVERSE_JSARGS
__ StoreP(scratch, MemOperand(r4, r1));
-#else
- __ push(scratch);
-#endif
__ CmpP(r7, Operand::Zero());
__ bne(&loop);
}
@@ -2552,7 +2313,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
// Pop receiver.
__ Pop(r7);
@@ -2574,42 +2334,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Push receiver.
__ Push(r7);
-#else
- __ LoadRR(scratch, sp);
- __ LoadRR(sp, r1);
-
- // Relocate arguments down the stack.
- // -- r2 : the number of arguments (not including the receiver)
- // -- r8 : the previous stack pointer
- {
- Label skip, loop;
- __ LoadImmP(r7, Operand::Zero());
- __ CmpP(r2, Operand::Zero());
- __ beq(&skip);
- __ LoadRR(r1, r2);
- __ bind(&loop);
- __ LoadP(r0, MemOperand(scratch, r7));
- __ StoreP(r0, MemOperand(sp, r7));
- __ lay(r7, MemOperand(r7, kSystemPointerSize));
- __ BranchOnCount(r1, &loop);
- __ bind(&skip);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop;
- __ ShiftLeftP(r9, r6, Operand(kTaggedSizeLog2));
- __ lay(r4, MemOperand(r4, r9, FixedArray::kHeaderSize - kHeapObjectTag));
- __ LoadRR(r1, r6);
- __ bind(&loop);
- __ LoadAnyTaggedField(ip, MemOperand(r4, -kTaggedSize), r0);
- __ lay(r4, MemOperand(r4, -kTaggedSize));
- __ StoreP(ip, MemOperand(sp, r7));
- __ lay(r7, MemOperand(r7, kSystemPointerSize));
- __ BranchOnCount(r1, &loop);
- __ AddP(r2, r2, r6);
- }
-#endif
}
__ bind(&no_bound_arguments);
}
@@ -2804,18 +2528,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r5 : new target (passed through to callee)
// -----------------------------------
- Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
+ Label dont_adapt_arguments, stack_overflow;
__ tmll(r4, Operand(kDontAdaptArgumentsSentinel));
__ b(Condition(1), &dont_adapt_arguments);
__ LoadTaggedPointerField(
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
-#ifndef V8_REVERSE_JSARGS
- __ tmlh(r6,
- Operand(SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask >>
- 16));
- __ bne(&skip_adapt_arguments);
-#endif
// -------------------------------------------
// Adapt arguments.
@@ -2836,13 +2554,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: function
// r4: expected number of arguments
// r5: new target (passed through to callee)
-#ifdef V8_REVERSE_JSARGS
__ ShiftLeftP(r2, r4, Operand(kSystemPointerSizeLog2));
__ AddP(r2, fp);
-#else
- __ SmiToPtrArrayOffset(r2, r2);
- __ AddP(r2, fp);
-#endif
// adjust for return address and receiver
__ AddP(r2, r2, Operand(2 * kSystemPointerSize));
__ ShiftLeftP(r6, r4, Operand(kSystemPointerSizeLog2));
@@ -2872,7 +2585,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
EnterArgumentsAdaptorFrame(masm);
Generate_StackOverflowCheck(masm, r4, r7, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Fill the remaining expected arguments with undefined.
// r0: actual number of arguments as a smi
// r1: function
@@ -2917,46 +2629,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ CmpP(r2, fp); // Compare before moving to next argument.
__ lay(r2, MemOperand(r2, -kSystemPointerSize));
__ b(ne, &copy);
-#else
- // Calculate copy start address into r0 and copy end address is fp.
- // r2: actual number of arguments as a smi
- // r3: function
- // r4: expected number of arguments
- // r5: new target (passed through to callee)
- __ SmiToPtrArrayOffset(r2, r2);
- __ lay(r2, MemOperand(r2, fp));
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r2: copy start address
- // r3: function
- // r4: expected number of arguments
- // r5: new target (passed through to callee)
- Label copy;
- __ bind(&copy);
- // Adjust load for return address and receiver.
- __ LoadP(r0, MemOperand(r2, 2 * kSystemPointerSize));
- __ push(r0);
- __ CmpP(r2, fp); // Compare before moving to next argument.
- __ lay(r2, MemOperand(r2, -kSystemPointerSize));
- __ bne(&copy);
-
- // Fill the remaining expected arguments with undefined.
- // r3: function
- // r4: expected number of argumentus
- __ LoadRoot(r0, RootIndex::kUndefinedValue);
- __ ShiftLeftP(r6, r4, Operand(kSystemPointerSizeLog2));
- __ SubP(r6, fp, r6);
- // Adjust for frame.
- __ SubP(r6, r6,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kSystemPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ push(r0);
- __ CmpP(sp, r6);
- __ bne(&fill);
-#endif
}
// Call the entry point.
@@ -2979,42 +2651,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
// -------------------------------------------
- // Skip adapt arguments.
- // -------------------------------------------
- __ bind(&skip_adapt_arguments);
- {
- // The callee cannot observe the actual arguments, so it's safe to just
- // pass the expected arguments by massaging the stack appropriately. See
- // http://bit.ly/v8-faster-calls-with-arguments-mismatch for details.
- Label under_application, over_application;
- __ CmpP(r2, r4);
- __ blt(&under_application);
-
- __ bind(&over_application);
- {
- // Remove superfluous parameters from the stack.
- __ SubP(r6, r2, r4);
- __ lgr(r2, r4);
- __ ShiftLeftP(r6, r6, Operand(kSystemPointerSizeLog2));
- __ lay(sp, MemOperand(sp, r6));
- __ b(&dont_adapt_arguments);
- }
-
- __ bind(&under_application);
- {
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ LoadRoot(r6, RootIndex::kUndefinedValue);
- __ bind(&fill);
- __ AddP(r2, r2, Operand(1));
- __ push(r6);
- __ CmpP(r2, r4);
- __ blt(&fill);
- __ b(&dont_adapt_arguments);
- }
- }
-
- // -------------------------------------------
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
@@ -3507,12 +3143,11 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- r4 : arguments count (not including the receiver)
// -- r5 : call data
// -- r2 : holder
- // -- sp[0] : last argument
+ // -- sp[0] : receiver
+ // -- sp[8] : first argument
// -- ...
- // -- sp[(argc - 1) * 4] : first argument
- // -- sp[(argc + 0) * 4] : receiver
+ // -- sp[(argc) * 8] : last argument
// -----------------------------------
- // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
Register api_function_address = r3;
Register argc = r4;
@@ -3587,15 +3222,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
-#ifdef V8_REVERSE_JSARGS
__ AddP(scratch, scratch,
Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
-#else
- __ AddP(scratch, scratch,
- Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
- __ ShiftLeftP(r1, argc, Operand(kSystemPointerSizeLog2));
- __ AddP(scratch, scratch, r1);
-#endif
__ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
kSystemPointerSize));
@@ -3737,6 +3365,242 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ stop();
}
+namespace {
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Save all the registers onto the stack
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
+
+ // Save all double registers before messing with them.
+ __ lay(sp, MemOperand(sp, -kDoubleRegsSize));
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister dreg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ StoreDouble(dreg, MemOperand(sp, offset));
+ }
+
+ // Push all GPRs onto the stack
+ __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kSystemPointerSize));
+ __ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
+
+ __ Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ isolate));
+ __ StoreP(fp, MemOperand(r1));
+
+ static constexpr int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
+
+ __ lgfi(r4, Operand(Deoptimizer::kFixedExitSizeMarker));
+ // Cleanse the Return address for 31-bit
+ __ CleanseP(r14);
+ // Get the address of the location in the code object (r5)(return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register r6.
+ __ LoadRR(r5, r14);
+ __ la(r6, MemOperand(sp, kSavedRegistersAreaSize));
+ __ SubP(r6, fp, r6);
+
+ // Allocate a new deoptimizer object.
+ // Pass six arguments in r2 to r7.
+ __ PrepareCallCFunction(6, r7);
+ __ LoadImmP(r2, Operand::Zero());
+ Label context_check;
+ __ LoadP(r3, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(r3, &context_check);
+ __ LoadP(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ LoadImmP(r3, Operand(static_cast<int>(deopt_kind)));
+ // r4: bailout id already loaded.
+ // r5: code address or 0 already loaded.
+ // r6: Fp-to-sp delta.
+ // Parm6: isolate is passed on the stack.
+ __ Move(r7, ExternalReference::isolate_address(isolate));
+ __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
+
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve "deoptimizer" object in register r2 and get the input
+ // frame descriptor pointer to r3 (deoptimizer->input_);
+ __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ // DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
+ // __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
+ // MemOperand(sp), kNumberOfRegisters * kSystemPointerSize);
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ // TODO(john.yan): optimize the following code by using mvc instruction
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ __ LoadP(r4, MemOperand(sp, i * kSystemPointerSize));
+ __ StoreP(r4, MemOperand(r3, offset));
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Copy double registers to
+ // double_registers_[DoubleRegister::kNumRegisters]
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset =
+ code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
+ // TODO(joransiu): MVC opportunity
+ __ LoadDouble(d0, MemOperand(sp, src_offset));
+ __ StoreDouble(d0, MemOperand(r3, dst_offset));
+ }
+
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register zero = r6;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ lhi(zero, Operand(0));
+ __ StoreByte(zero, MemOperand(is_iterable));
+ }
+
+ // Remove the saved registers from the stack.
+ __ la(sp, MemOperand(sp, kSavedRegistersAreaSize));
+
+ // Compute a pointer to the unwinding limit in register r4; that is
+ // the first stack slot not part of the input frame.
+ __ LoadP(r4, MemOperand(r3, FrameDescription::frame_size_offset()));
+ __ AddP(r4, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ la(r5, MemOperand(r3, FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ b(&pop_loop_header, Label::kNear);
+ __ bind(&pop_loop);
+ __ pop(r6);
+ __ StoreP(r6, MemOperand(r5, 0));
+ __ la(r5, MemOperand(r5, kSystemPointerSize));
+ __ bind(&pop_loop_header);
+ __ CmpP(r4, sp);
+ __ bne(&pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ push(r2); // Preserve deoptimizer object across call.
+ // r2: deoptimizer object; r3: scratch.
+ __ PrepareCallCFunction(1, r3);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ pop(r2); // Restore deoptimizer object (class Deoptimizer).
+
+ __ LoadP(sp, MemOperand(r2, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: r6 = current "FrameDescription** output_",
+ // r3 = one past the last FrameDescription**.
+ __ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
+ __ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
+ __ ShiftLeftP(r3, r3, Operand(kSystemPointerSizeLog2));
+ __ AddP(r3, r6, r3);
+ __ b(&outer_loop_header, Label::kNear);
+
+ __ bind(&outer_push_loop);
+ // Inner loop state: r4 = current FrameDescription*, r5 = loop index.
+ __ LoadP(r4, MemOperand(r6, 0)); // output_[ix]
+ __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
+ __ b(&inner_loop_header, Label::kNear);
+
+ __ bind(&inner_push_loop);
+ __ SubP(r5, Operand(sizeof(intptr_t)));
+ __ AddP(r8, r4, r5);
+ __ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
+ __ push(r8);
+
+ __ bind(&inner_loop_header);
+ __ CmpP(r5, Operand::Zero());
+ __ bne(&inner_push_loop); // test for gt?
+
+ __ AddP(r6, r6, Operand(kSystemPointerSize));
+ __ bind(&outer_loop_header);
+ __ CmpP(r6, r3);
+ __ blt(&outer_push_loop);
+
+ __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister dreg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ ld(dreg, MemOperand(r3, src_offset));
+ }
+
+ // Push pc and continuation from the last output frame.
+ __ LoadP(r8, MemOperand(r4, FrameDescription::pc_offset()));
+ __ push(r8);
+ __ LoadP(r8, MemOperand(r4, FrameDescription::continuation_offset()));
+ __ push(r8);
+
+ // Restore the registers from the last output frame.
+ __ LoadRR(r1, r4);
+ for (int i = kNumberOfRegisters - 1; i > 0; i--) {
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ LoadP(ToRegister(i), MemOperand(r1, offset));
+ }
+ }
+
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register one = r6;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ lhi(one, Operand(1));
+ __ StoreByte(one, MemOperand(is_iterable));
+ }
+
+ __ pop(ip); // get continuation, leave pc on stack
+ __ pop(r14);
+ __ Jump(ip);
+
+ __ stop();
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index 2d7e93c9bb..baf64f7fa7 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -38,8 +38,7 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
CHECK(!options.use_pc_relative_calls_and_jumps);
CHECK(!options.collect_win64_unwind_info);
- if (!isolate->IsGeneratingEmbeddedBuiltins() ||
- !Builtins::IsIsolateIndependent(builtin_index)) {
+ if (!isolate->IsGeneratingEmbeddedBuiltins()) {
return options;
}
diff --git a/deps/v8/src/builtins/string-trim.tq b/deps/v8/src/builtins/string-trim.tq
new file mode 100644
index 0000000000..eef0ccd84f
--- /dev/null
+++ b/deps/v8/src/builtins/string-trim.tq
@@ -0,0 +1,168 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-string-gen.h'
+
+namespace string {
+
+extern enum TrimMode extends uint31 constexpr 'String::TrimMode' {
+ kTrim,
+ kTrimStart,
+ kTrimEnd
+}
+
+@export
+macro IsWhiteSpaceOrLineTerminator(charCode: int32): bool {
+ // 0x0020 - SPACE (Intentionally out of order to fast path a commmon case)
+ if (charCode == Int32Constant(0x0020)) {
+ return true;
+ }
+
+ // 0x0009 - HORIZONTAL TAB
+ if (charCode < Int32Constant(0x0009)) {
+ return false;
+ }
+ // 0x000A - LINE FEED OR NEW LINE
+ // 0x000B - VERTICAL TAB
+ // 0x000C - FORMFEED
+ // 0x000D - HORIZONTAL TAB
+ if (charCode <= Int32Constant(0x000D)) {
+ return true;
+ }
+
+ // Common Non-whitespace characters
+ if (charCode < Int32Constant(0x00A0)) {
+ return false;
+ }
+
+ // 0x00A0 - NO-BREAK SPACE
+ if (charCode == Int32Constant(0x00A0)) {
+ return true;
+ }
+
+ // 0x1680 - Ogham Space Mark
+ if (charCode == Int32Constant(0x1680)) {
+ return true;
+ }
+
+ // 0x2000 - EN QUAD
+ if (charCode < Int32Constant(0x2000)) {
+ return false;
+ }
+ // 0x2001 - EM QUAD
+ // 0x2002 - EN SPACE
+ // 0x2003 - EM SPACE
+ // 0x2004 - THREE-PER-EM SPACE
+ // 0x2005 - FOUR-PER-EM SPACE
+ // 0x2006 - SIX-PER-EM SPACE
+ // 0x2007 - FIGURE SPACE
+ // 0x2008 - PUNCTUATION SPACE
+ // 0x2009 - THIN SPACE
+ // 0x200A - HAIR SPACE
+ if (charCode <= Int32Constant(0x200A)) {
+ return true;
+ }
+
+ // 0x2028 - LINE SEPARATOR
+ if (charCode == Int32Constant(0x2028)) {
+ return true;
+ }
+ // 0x2029 - PARAGRAPH SEPARATOR
+ if (charCode == Int32Constant(0x2029)) {
+ return true;
+ }
+ // 0x202F - NARROW NO-BREAK SPACE
+ if (charCode == Int32Constant(0x202F)) {
+ return true;
+ }
+ // 0x205F - MEDIUM MATHEMATICAL SPACE
+ if (charCode == Int32Constant(0x205F)) {
+ return true;
+ }
+ // 0xFEFF - BYTE ORDER MARK
+ if (charCode == Int32Constant(0xFEFF)) {
+ return true;
+ }
+ // 0x3000 - IDEOGRAPHIC SPACE
+ if (charCode == Int32Constant(0x3000)) {
+ return true;
+ }
+
+ return false;
+}
+
+transitioning macro StringTrim(implicit context: Context)(
+ receiver: JSAny, _arguments: Arguments, methodName: constexpr string,
+ variant: constexpr TrimMode): String {
+ const receiverString: String = ToThisString(receiver, methodName);
+ const stringLength: intptr = receiverString.length_intptr;
+
+ const directString = Cast<DirectString>(receiverString)
+ otherwise return runtime::StringTrim(
+ receiverString, SmiTag<TrimMode>(variant));
+
+ let startIndex: intptr = 0;
+ let endIndex: intptr = stringLength - 1;
+
+ // TODO(duongn): It would probably be more efficient to turn StringTrim into a
+ // tempalate for the different string types and specialize the loop for them.
+ if (variant == TrimMode::kTrim || variant == TrimMode::kTrimStart) {
+ while (true) {
+ if (startIndex == stringLength) {
+ return EmptyStringConstant();
+ }
+ if (!IsWhiteSpaceOrLineTerminator(
+ StringCharCodeAt(directString, Unsigned(startIndex)))) {
+ break;
+ }
+ startIndex++;
+ }
+ }
+
+ if (variant == TrimMode::kTrim || variant == TrimMode::kTrimEnd) {
+ while (true) {
+ if (endIndex == -1) {
+ return EmptyStringConstant();
+ }
+ if (!IsWhiteSpaceOrLineTerminator(
+ StringCharCodeAt(directString, Unsigned(endIndex)))) {
+ break;
+ }
+ endIndex--;
+ }
+ }
+
+ return SubString(
+ receiverString, Unsigned(startIndex), Unsigned(endIndex + 1));
+}
+
+// ES6 #sec-string.prototype.trim
+transitioning javascript builtin
+StringPrototypeTrim(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ const methodName: constexpr string = 'String.prototype.trim';
+ return StringTrim(receiver, arguments, methodName, TrimMode::kTrim);
+}
+
+// https://github.com/tc39/proposal-string-left-right-trim
+transitioning javascript builtin
+StringPrototypeTrimStart(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ const methodName: constexpr string = 'String.prototype.trimLeft';
+ return StringTrim(receiver, arguments, methodName, TrimMode::kTrimStart);
+}
+
+// https://github.com/tc39/proposal-string-left-right-trim
+transitioning javascript builtin
+StringPrototypeTrimEnd(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ const methodName: constexpr string = 'String.prototype.trimRight';
+ return StringTrim(receiver, arguments, methodName, TrimMode::kTrimEnd);
+}
+}
+
+namespace runtime {
+extern runtime StringTrim(implicit context: Context)(
+ String, SmiTagged<string::TrimMode>): String;
+}
diff --git a/deps/v8/src/builtins/torque-internal.tq b/deps/v8/src/builtins/torque-internal.tq
index 254663039c..28636bdbfe 100644
--- a/deps/v8/src/builtins/torque-internal.tq
+++ b/deps/v8/src/builtins/torque-internal.tq
@@ -226,6 +226,11 @@ macro DownCastForTorqueClass<T : type extends HeapObject>(o: HeapObject):
extern macro StaticAssert(bool, constexpr string);
+// This is for the implementation of the dot operator. In any context where the
+// dot operator is available, the correct way to get the length of an indexed
+// field x from object o is `(&o.x).length`.
+intrinsic %IndexedFieldLength<T: type>(o: T, f: constexpr string);
+
} // namespace torque_internal
// Indicates that an array-field should not be initialized.
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index ec51efc00a..6e416ddd98 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -53,6 +53,7 @@ transitioning macro AllocateTypedArray(implicit context: Context)(
typedArray.byte_offset = byteOffset;
typedArray.byte_length = byteLength;
typedArray.length = length;
+ typed_array::AllocateJSTypedArrayExternalPointerEntry(typedArray);
if constexpr (isOnHeap) {
typed_array::SetJSTypedArrayOnHeapDataPtr(typedArray, elements, byteOffset);
} else {
diff --git a/deps/v8/src/builtins/typed-array-sort.tq b/deps/v8/src/builtins/typed-array-sort.tq
index c32808038d..614852f444 100644
--- a/deps/v8/src/builtins/typed-array-sort.tq
+++ b/deps/v8/src/builtins/typed-array-sort.tq
@@ -114,6 +114,14 @@ transitioning javascript builtin TypedArrayPrototypeSort(
return TypedArraySortFast(context, obj);
}
+ // Throw rather than crash if the TypedArray's size exceeds max FixedArray
+ // size (which we need below).
+ // TODO(4153): Consider redesigning the sort implementation such that we
+ // don't have such a limit.
+ if (len > kFixedArrayMaxLength) {
+ ThrowTypeError(MessageTemplate::kTypedArrayTooLargeToSort);
+ }
+
const comparefn: Callable =
Cast<Callable>(comparefnObj) otherwise unreachable;
const accessor: TypedArrayAccessor =
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index ca18b432ab..d8fc788dfb 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -157,6 +157,10 @@ macro GetTypedArrayAccessor(elementsKind: ElementsKind): TypedArrayAccessor {
unreachable;
}
+extern macro
+TypedArrayBuiltinsAssembler::AllocateJSTypedArrayExternalPointerEntry(
+ JSTypedArray): void;
+
extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
JSTypedArray, ByteArray, uintptr): void;
extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq
index fda048518a..411bb0c41e 100644
--- a/deps/v8/src/builtins/wasm.tq
+++ b/deps/v8/src/builtins/wasm.tq
@@ -282,24 +282,32 @@ builtin WasmUint32ToNumber(value: uint32): Number {
return ChangeUint32ToTagged(value);
}
+builtin UintPtr53ToNumber(value: uintptr): Number {
+ if (value <= kSmiMaxValue) return Convert<Smi>(Convert<intptr>(value));
+ const valueFloat = ChangeUintPtrToFloat64(value);
+ // Values need to be within [0..2^53], such that they can be represented as
+ // float64.
+ assert(ChangeFloat64ToUintPtr(valueFloat) == value);
+ return AllocateHeapNumberWithValue(valueFloat);
+}
+
extern builtin I64ToBigInt(intptr): BigInt;
-builtin WasmAtomicNotify(address: uint32, count: uint32): uint32 {
+builtin WasmAtomicNotify(offset: uintptr, count: uint32): uint32 {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const result: Smi = runtime::WasmAtomicNotify(
- LoadContextFromInstance(instance), instance, WasmUint32ToNumber(address),
+ LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
WasmUint32ToNumber(count));
return Unsigned(SmiToInt32(result));
}
builtin WasmI32AtomicWait64(
- address: uint32, expectedValue: int32, timeout: intptr): uint32 {
+ offset: uintptr, expectedValue: int32, timeout: intptr): uint32 {
if constexpr (Is64()) {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const result: Smi = runtime::WasmI32AtomicWait(
- LoadContextFromInstance(instance), instance,
- WasmUint32ToNumber(address), WasmInt32ToNumber(expectedValue),
- I64ToBigInt(timeout));
+ LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
+ WasmInt32ToNumber(expectedValue), I64ToBigInt(timeout));
return Unsigned(SmiToInt32(result));
} else {
unreachable;
@@ -307,13 +315,12 @@ builtin WasmI32AtomicWait64(
}
builtin WasmI64AtomicWait64(
- address: uint32, expectedValue: intptr, timeout: intptr): uint32 {
+ offset: uintptr, expectedValue: intptr, timeout: intptr): uint32 {
if constexpr (Is64()) {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const result: Smi = runtime::WasmI64AtomicWait(
- LoadContextFromInstance(instance), instance,
- WasmUint32ToNumber(address), I64ToBigInt(expectedValue),
- I64ToBigInt(timeout));
+ LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
+ I64ToBigInt(expectedValue), I64ToBigInt(timeout));
return Unsigned(SmiToInt32(result));
} else {
unreachable;
@@ -385,10 +392,6 @@ builtin ThrowWasmTrapFloatUnrepresentable(): JSAny {
tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapFloatUnrepresentable));
}
-builtin ThrowWasmTrapFuncInvalid(): JSAny {
- tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapFuncInvalid));
-}
-
builtin ThrowWasmTrapFuncSigMismatch(): JSAny {
tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapFuncSigMismatch));
}
@@ -424,8 +427,4 @@ builtin ThrowWasmTrapIllegalCast(): JSAny {
builtin ThrowWasmTrapArrayOutOfBounds(): JSAny {
tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapArrayOutOfBounds));
}
-
-builtin ThrowWasmTrapWasmJSFunction(): JSAny {
- tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapWasmJSFunction));
-}
}
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index b94817f6f5..f7eb4658d5 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -26,6 +26,7 @@
#include "src/objects/smi.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/object-access.h"
+#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
@@ -74,43 +75,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
-
-Operand StackLimitAsOperand(MacroAssembler* masm, StackLimitKind kind) {
- DCHECK(masm->root_array_available());
- Isolate* isolate = masm->isolate();
- ExternalReference limit =
- kind == StackLimitKind::kRealStackLimit
- ? ExternalReference::address_of_real_jslimit(isolate)
- : ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
-
- intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
- CHECK(is_int32(offset));
- return Operand(kRootRegister, static_cast<int32_t>(offset));
-}
-
-void Generate_StackOverflowCheck(
- MacroAssembler* masm, Register num_args, Register scratch,
- Label* stack_overflow,
- Label::Distance stack_overflow_distance = Label::kFar) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ movq(kScratchRegister,
- StackLimitAsOperand(masm, StackLimitKind::kRealStackLimit));
- __ movq(scratch, rsp);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ subq(scratch, kScratchRegister);
- __ sarq(scratch, Immediate(kSystemPointerSizeLog2));
- // Check if the arguments will overflow the stack.
- __ cmpq(scratch, num_args);
- // Signed comparison.
- __ j(less_equal, stack_overflow, stack_overflow_distance);
-}
-
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax: number of arguments
@@ -120,7 +84,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -----------------------------------
Label stack_overflow;
- Generate_StackOverflowCheck(masm, rax, rcx, &stack_overflow, Label::kFar);
+ __ StackOverflowCheck(rax, rcx, &stack_overflow, Label::kFar);
// Enter a construct frame.
{
@@ -136,7 +100,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// correct position (including any undefined), instead of delaying this to
// InvokeFunction.
-#ifdef V8_REVERSE_JSARGS
// Set up pointer to first argument (skip receiver).
__ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize));
@@ -144,14 +107,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ PushArray(rbx, rax, rcx);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-#else
- // The receiver for the builtin/api call.
- __ PushRoot(RootIndex::kTheHoleValue);
- // Set up pointer to last argument.
- __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
- // Copy arguments to the expression stack.
- __ PushArray(rbx, rax, rcx);
-#endif
// Call the function.
// rax: number of arguments (untagged)
@@ -159,8 +114,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// rdx: new target
__ InvokeFunction(rdi, rdx, rax, CALL_FUNCTION);
- // Restore context from the frame.
- __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
// Restore smi-tagged arguments count from the frame.
__ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
@@ -195,176 +148,161 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
+ FrameScope scope(masm, StackFrame::MANUAL);
// Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
- Label post_instantiation_deopt_entry, not_create_implicit_receiver;
-
- // Preserve the incoming parameters on the stack.
- __ SmiTag(rcx, rax);
- __ Push(rsi);
- __ Push(rcx);
- __ Push(rdi);
- __ PushRoot(RootIndex::kTheHoleValue);
- __ Push(rdx);
-
- // ----------- S t a t e -------------
- // -- sp[0*kSystemPointerSize]: new target
- // -- sp[1*kSystemPointerSize]: padding
- // -- rdi and sp[2*kSystemPointerSize]: constructor function
- // -- sp[3*kSystemPointerSize]: argument count
- // -- sp[4*kSystemPointerSize]: context
- // -----------------------------------
-
- __ LoadTaggedPointerField(
- rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movl(rbx, FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset));
- __ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx);
- __ JumpIfIsInRange(rbx, kDefaultDerivedConstructor, kDerivedConstructor,
- &not_create_implicit_receiver, Label::kNear);
-
- // If not derived class constructor: Allocate the new receiver object.
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ jmp(&post_instantiation_deopt_entry, Label::kNear);
-
- // Else: use TheHoleValue as receiver for constructor call
- __ bind(&not_create_implicit_receiver);
- __ LoadRoot(rax, RootIndex::kTheHoleValue);
-
- // ----------- S t a t e -------------
- // -- rax implicit receiver
- // -- Slot 4 / sp[0*kSystemPointerSize] new target
- // -- Slot 3 / sp[1*kSystemPointerSize] padding
- // -- Slot 2 / sp[2*kSystemPointerSize] constructor function
- // -- Slot 1 / sp[3*kSystemPointerSize] number of arguments (tagged)
- // -- Slot 0 / sp[4*kSystemPointerSize] context
- // -----------------------------------
- // Deoptimizer enters here.
- masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
- masm->pc_offset());
- __ bind(&post_instantiation_deopt_entry);
-
- // Restore new target.
- __ Pop(rdx);
-
- // Push the allocated receiver to the stack.
- __ Push(rax);
-
-#ifdef V8_REVERSE_JSARGS
- // We need two copies because we may have to return the original one
- // and the calling conventions dictate that the called function pops the
- // receiver. The second copy is pushed after the arguments, we saved in r8
- // since rax needs to store the number of arguments before
- // InvokingFunction.
- __ movq(r8, rax);
-
- // Set up pointer to first argument (skip receiver).
- __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
- kSystemPointerSize));
-#else
- // We need two copies because we may have to return the original one
- // and the calling conventions dictate that the called function pops the
- // receiver.
- __ Push(rax);
-
- // Set up pointer to last argument.
- __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
-#endif
-
- // Restore constructor function and argument count.
- __ movq(rdi, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
- __ SmiUntag(rax, Operand(rbp, ConstructFrameConstants::kLengthOffset));
-
- // Check if we have enough stack space to push all arguments.
- // Argument count in rax. Clobbers rcx.
- Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, rax, rcx, &stack_overflow, Label::kNear);
- __ jmp(&enough_stack_space, Label::kNear);
-
- __ bind(&stack_overflow);
- // Restore context from the frame.
- __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kThrowStackOverflow);
- // This should be unreachable.
- __ int3();
-
- __ bind(&enough_stack_space);
-
- // TODO(victorgomes): When the arguments adaptor is completely removed, we
- // should get the formal parameter count and copy the arguments in its
- // correct position (including any undefined), instead of delaying this to
- // InvokeFunction.
-
- // Copy arguments to the expression stack.
- __ PushArray(rbx, rax, rcx);
+ __ EnterFrame(StackFrame::CONSTRUCT);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
-#ifdef V8_REVERSE_JSARGS
- // Push implicit receiver.
- __ Push(r8);
-#endif
-
- // Call the function.
- __ InvokeFunction(rdi, rdx, rax, CALL_FUNCTION);
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(rcx, rax);
+ __ Push(rsi);
+ __ Push(rcx);
+ __ Push(rdi);
+ __ PushRoot(RootIndex::kTheHoleValue);
+ __ Push(rdx);
- // ----------- S t a t e -------------
- // -- rax constructor result
- // -- sp[0*kSystemPointerSize] implicit receiver
- // -- sp[1*kSystemPointerSize] padding
- // -- sp[2*kSystemPointerSize] constructor function
- // -- sp[3*kSystemPointerSize] number of arguments
- // -- sp[4*kSystemPointerSize] context
- // -----------------------------------
+ // ----------- S t a t e -------------
+ // -- sp[0*kSystemPointerSize]: new target
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- rdi and sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: argument count
+ // -- sp[4*kSystemPointerSize]: context
+ // -----------------------------------
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
+ __ LoadTaggedPointerField(
+ rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movl(rbx, FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset));
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx);
+ __ JumpIfIsInRange(rbx, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver, Label::kNear);
- // Restore context from the frame.
- __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+ __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
+ __ jmp(&post_instantiation_deopt_entry, Label::kNear);
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, do_throw, leave_frame;
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(rax, RootIndex::kTheHoleValue);
- // If the result is undefined, we jump out to using the implicit receiver.
- __ JumpIfRoot(rax, RootIndex::kUndefinedValue, &use_receiver, Label::kNear);
+ // ----------- S t a t e -------------
+ // -- rax implicit receiver
+ // -- Slot 4 / sp[0*kSystemPointerSize] new target
+ // -- Slot 3 / sp[1*kSystemPointerSize] padding
+ // -- Slot 2 / sp[2*kSystemPointerSize] constructor function
+ // -- Slot 1 / sp[3*kSystemPointerSize] number of arguments (tagged)
+ // -- Slot 0 / sp[4*kSystemPointerSize] context
+ // -----------------------------------
+ // Deoptimizer enters here.
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
+
+ // Restore new target.
+ __ Pop(rdx);
+
+ // Push the allocated receiver to the stack.
+ __ Push(rax);
+
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments, we saved in r8
+ // since rax needs to store the number of arguments before
+ // InvokingFunction.
+ __ movq(r8, rax);
+
+ // Set up pointer to first argument (skip receiver).
+ __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
+ kSystemPointerSize));
+
+ // Restore constructor function and argument count.
+ __ movq(rdi, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
+ __ SmiUntag(rax, Operand(rbp, ConstructFrameConstants::kLengthOffset));
+
+ // Check if we have enough stack space to push all arguments.
+ // Argument count in rax. Clobbers rcx.
+ Label stack_overflow;
+ __ StackOverflowCheck(rax, rcx, &stack_overflow);
- // Otherwise we do a smi check and fall through to check if the return value
- // is a valid receiver.
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(rax, &use_receiver, Label::kNear);
+ // Copy arguments to the expression stack.
+ __ PushArray(rbx, rax, rcx);
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
- __ j(above_equal, &leave_frame, Label::kNear);
- __ jmp(&use_receiver, Label::kNear);
+ // Push implicit receiver.
+ __ Push(r8);
- __ bind(&do_throw);
- __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ // Call the function.
+ __ InvokeFunction(rdi, rdx, rax, CALL_FUNCTION);
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ movq(rax, Operand(rsp, 0 * kSystemPointerSize));
- __ JumpIfRoot(rax, RootIndex::kTheHoleValue, &do_throw, Label::kNear);
+ // ----------- S t a t e -------------
+ // -- rax constructor result
+ // -- sp[0*kSystemPointerSize] implicit receiver
+ // -- sp[1*kSystemPointerSize] padding
+ // -- sp[2*kSystemPointerSize] constructor function
+ // -- sp[3*kSystemPointerSize] number of arguments
+ // -- sp[4*kSystemPointerSize] context
+ // -----------------------------------
- __ bind(&leave_frame);
- // Restore the arguments count.
- __ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
- // Leave construct frame.
- }
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, leave_and_return, check_result;
+
+ // If the result is undefined, we'll use the implicit receiver. Otherwise we
+ // do a smi check and fall through to check if the return value is a valid
+ // receiver.
+ __ JumpIfNotRoot(rax, RootIndex::kUndefinedValue, &check_result,
+ Label::kNear);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ movq(rax, Operand(rsp, 0 * kSystemPointerSize));
+ __ JumpIfRoot(rax, RootIndex::kTheHoleValue, &do_throw, Label::kNear);
+
+ __ bind(&leave_and_return);
+ // Restore the arguments count.
+ __ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
+ __ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2);
__ leaq(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
__ PushReturnAddressFrom(rcx);
__ ret(0);
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ bind(&check_result);
+ __ JumpIfSmi(rax, &use_receiver, Label::kNear);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
+ __ j(above_equal, &leave_and_return, Label::kNear);
+ __ jmp(&use_receiver);
+
+ __ bind(&do_throw);
+ // Restore context from the frame.
+ __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ // We don't return here.
+ __ int3();
+
+ __ bind(&stack_overflow);
+ // Restore the context from the frame.
+ __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // This should be unreachable.
+ __ int3();
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
@@ -617,11 +555,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the function onto the stack.
__ Push(rdi);
-#ifndef V8_REVERSE_JSARGS
- // Push the receiver onto the stack.
- __ Push(arg_reg_4);
-#endif
-
#ifdef V8_TARGET_OS_WIN
// Load the previous frame pointer to access C arguments on stack
__ movq(kScratchRegister, Operand(rbp, 0));
@@ -632,30 +565,24 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Load the number of arguments and setup pointer to the arguments.
__ movq(rax, r8);
__ movq(rbx, r9);
-#ifdef V8_REVERSE_JSARGS
__ movq(r9, arg_reg_4); // Temporarily saving the receiver.
-#endif
#endif // V8_TARGET_OS_WIN
- // Current stack contents if V8_REVERSE_JSARGS:
+ // Current stack contents:
// [rsp + kSystemPointerSize] : Internal frame
// [rsp] : function
- // Current stack contents if not V8_REVERSE_JSARGS:
- // [rsp + 2 * kSystemPointerSize] : Internal frame
- // [rsp + kSystemPointerSize] : function
- // [rsp] : receiver
// Current register contents:
// rax : argc
// rbx : argv
// rsi : context
// rdi : function
// rdx : new.target
- // r9 : receiver, if V8_REVERSE_JSARGS
+ // r9 : receiver
// Check if we have enough stack space to push all arguments.
// Argument count in rax. Clobbers rcx.
Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, rax, rcx, &stack_overflow, Label::kNear);
+ __ StackOverflowCheck(rax, rcx, &stack_overflow, Label::kNear);
__ jmp(&enough_stack_space, Label::kNear);
__ bind(&stack_overflow);
@@ -668,7 +595,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Copy arguments to the stack in a loop.
// Register rbx points to array of pointers to handle locations.
// Push the values of these handles.
-#ifdef V8_REVERSE_JSARGS
Label loop, entry;
__ movq(rcx, rax);
__ jmp(&entry, Label::kNear);
@@ -681,18 +607,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the receiver.
__ Push(r9);
-#else
- Label loop, entry;
- __ Set(rcx, 0); // Set loop variable to 0.
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, times_system_pointer_size, 0));
- __ Push(Operand(kScratchRegister, 0)); // dereference handle
- __ addq(rcx, Immediate(1));
- __ bind(&entry);
- __ cmpq(rcx, rax);
- __ j(not_equal, &loop, Label::kNear);
-#endif
// Invoke the builtin code.
Handle<Code> builtin = is_construct
@@ -779,24 +693,17 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ cmpq(rsp, StackLimitAsOperand(masm, StackLimitKind::kRealStackLimit));
+ __ cmpq(rsp, __ StackLimitAsOperand(StackLimitKind::kRealStackLimit));
__ j(below, &stack_overflow);
// Pop return address.
__ PopReturnAddressTo(rax);
-#ifndef V8_REVERSE_JSARGS
- // Push receiver.
- __ PushTaggedPointerField(
- FieldOperand(rdx, JSGeneratorObject::kReceiverOffset), decompr_scratch1);
-#endif
-
// ----------- S t a t e -------------
// -- rax : return address
// -- rdx : the JSGeneratorObject to resume
// -- rdi : generator function
// -- rsi : generator context
- // -- rsp[0] : generator receiver, if V8_REVERSE_JSARGS is not set
// -----------------------------------
// Copy the function arguments from the generator object's register file.
@@ -809,7 +716,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
{
-#ifdef V8_REVERSE_JSARGS
{
Label done_loop, loop;
__ movq(r9, rcx);
@@ -829,21 +735,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PushTaggedPointerField(
FieldOperand(rdx, JSGeneratorObject::kReceiverOffset),
decompr_scratch1);
-#else
- Label done_loop, loop;
- __ Set(r9, 0);
-
- __ bind(&loop);
- __ cmpl(r9, rcx);
- __ j(greater_equal, &done_loop, Label::kNear);
- __ PushTaggedAnyField(
- FieldOperand(rbx, r9, times_tagged_size, FixedArray::kHeaderSize),
- decompr_scratch1);
- __ addl(r9, Immediate(1));
- __ jmp(&loop);
-
- __ bind(&done_loop);
-#endif
}
// Underlying function needs to have bytecode available.
@@ -957,13 +848,13 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ PushReturnAddressFrom(return_pc);
}
-// Tail-call |function_id| if |smi_entry| == |marker|
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register smi_entry,
- OptimizationMarker marker,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ SmiCompare(smi_entry, Smi::FromEnum(marker));
+ __ Cmp(actual_marker, expected_marker);
__ j(not_equal, &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -994,12 +885,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
if (FLAG_debug_code) {
- __ SmiCompare(optimization_marker,
- Smi::FromEnum(OptimizationMarker::kInOptimizationQueue));
- __ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
+ __ int3();
}
}
@@ -1014,15 +904,20 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register closure = rdi;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, &heal_optimized_code_slot);
+
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- Label found_deoptimized_code;
__ LoadTaggedPointerField(
scratch1,
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ testl(FieldOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
- __ j(not_zero, &found_deoptimized_code);
+ __ j(not_zero, &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
@@ -1032,10 +927,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ Move(rcx, optimized_code_entry);
__ JumpCodeObject(rcx);
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1169,20 +1065,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &push_stack_frame);
- // Read off the optimized code slot in the feedback vector, and if there
- // is optimized code or an optimization marker, call that instead.
-
- Register optimized_code_entry = rcx;
-
- __ LoadAnyTaggedField(
- optimized_code_entry,
- FieldOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ // Read off the optimization state in the feedback vector.
+ Register optimization_state = rcx;
+ __ movl(optimization_state,
+ FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty.
- Label optimized_code_slot_not_empty;
- __ Cmp(optimized_code_entry, Smi::FromEnum(OptimizationMarker::kNone));
- __ j(not_equal, &optimized_code_slot_not_empty);
+ // Check if there is optimized code or a optimization marker that needs to be
+ // processed.
+ Label has_optimized_code_or_marker;
+ __ testl(
+ optimization_state,
+ Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ j(not_zero, &has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
@@ -1231,7 +1125,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ movq(rax, rsp);
__ subq(rax, rcx);
- __ cmpq(rax, StackLimitAsOperand(masm, StackLimitKind::kRealStackLimit));
+ __ cmpq(rax, __ StackLimitAsOperand(StackLimitKind::kRealStackLimit));
__ j(below, &stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
@@ -1263,7 +1157,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
- __ cmpq(rsp, StackLimitAsOperand(masm, StackLimitKind::kInterruptStackLimit));
+ __ cmpq(rsp, __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
__ j(below, &stack_check_interrupt);
__ bind(&after_stack_check_interrupt);
@@ -1333,19 +1227,25 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ int3(); // Should not return.
- __ bind(&optimized_code_slot_not_empty);
+ __ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
- // Check if optimized code marker is actually a weak reference to the
- // optimized code as opposed to an optimization marker.
- __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
- MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+
+ __ testl(
+ optimization_state,
+ Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ j(zero, &maybe_has_optimized_code);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
- // Load code entry from the weak reference, if it was cleared, resume
- // execution of unoptimized code.
- __ LoadWeakValue(optimized_code_entry, &not_optimized);
+ Register optimized_code_entry = optimization_state;
+ __ LoadAnyTaggedField(
+ optimized_code_entry,
+ FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15);
__ bind(&stack_overflow);
@@ -1364,12 +1264,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Operand(start_address, scratch, times_system_pointer_size,
kSystemPointerSize));
// Push the arguments.
-#ifdef V8_REVERSE_JSARGS
__ PushArray(start_address, num_args, scratch,
TurboAssembler::PushArrayOrder::kReverse);
-#else
- __ PushArray(start_address, num_args, scratch);
-#endif
}
// static
@@ -1386,22 +1282,19 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// -----------------------------------
Label stack_overflow;
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ decl(rax);
}
-#endif
__ leal(rcx, Operand(rax, 1)); // Add one for receiver.
// Add a stack check before pushing arguments.
- Generate_StackOverflowCheck(masm, rcx, rdx, &stack_overflow);
+ __ StackOverflowCheck(rcx, rdx, &stack_overflow);
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
-#ifdef V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Don't copy receiver.
__ decq(rcx);
@@ -1421,21 +1314,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// is below that.
__ movq(rbx, Operand(rbx, -kSystemPointerSize));
}
-#else
- // Push "undefined" as the receiver arg if we need to.
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(RootIndex::kUndefinedValue);
- __ decl(rcx); // Subtract one for receiver.
- }
-
- // rbx and rdx will be modified.
- Generate_InterpreterPushArgs(masm, rcx, rbx, rdx);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(rbx); // Pass the spread in a register
- __ decl(rax); // Subtract one for spread
- }
-#endif
// Call the target.
__ PushReturnAddressFrom(kScratchRegister); // Re-push return address.
@@ -1473,12 +1351,11 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
Label stack_overflow;
// Add a stack check before pushing arguments.
- Generate_StackOverflowCheck(masm, rax, r8, &stack_overflow);
+ __ StackOverflowCheck(rax, r8, &stack_overflow);
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ decl(rax);
@@ -1489,22 +1366,10 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Push slot for the receiver to be constructed.
__ Push(Immediate(0));
-#else
- // Push slot for the receiver to be constructed.
- __ Push(Immediate(0));
-
- // rcx and r8 will be modified.
- Generate_InterpreterPushArgs(masm, rax, rcx, r8);
-#endif
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
-#ifdef V8_REVERSE_JSARGS
// Pass the spread in the register rbx.
__ movq(rbx, Operand(rcx, -kSystemPointerSize));
-#else
- __ Pop(rbx); // Pass the spread in a register
- __ decl(rax); // Subtract one for spread
-#endif
// Push return address in preparation for the tail-call.
__ PushReturnAddressFrom(kScratchRegister);
} else {
@@ -1673,7 +1538,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
const RegisterConfiguration* config(RegisterConfiguration::Default());
int allocatable_register_count = config->num_allocatable_general_registers();
if (with_result) {
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin) {
// kScratchRegister is not included in the allocateable registers.
__ movq(kScratchRegister, rax);
@@ -1686,15 +1550,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
BuiltinContinuationFrameConstants::kFixedFrameSize),
rax);
}
-#else
- // Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point.
- __ movq(
- Operand(rsp, config->num_allocatable_general_registers() *
- kSystemPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize),
- rax);
-#endif
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -1703,7 +1558,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ SmiUntag(Register::from_code(code));
}
}
-#ifdef V8_REVERSE_JSARGS
if (with_result && java_script_builtin) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. rax contains the arguments count, the return value
@@ -1712,7 +1566,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
BuiltinContinuationFrameConstants::kFixedFrameSize),
kScratchRegister);
}
-#endif
__ movq(
rbp,
Operand(rsp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
@@ -1770,10 +1623,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rsp[0] : return address
- // The order of args depends on V8_REVERSE_JSARGS
- // -- args[0] : receiver
- // -- args[1] : thisArg
- // -- args[2] : argArray
+ // -- rsp[1] : receiver
+ // -- rsp[2] : thisArg
+ // -- rsp[3] : argArray
// -----------------------------------
// 1. Load receiver into rdi, argArray into rbx (if present), remove all
@@ -1836,15 +1688,13 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Stack Layout:
// rsp[0] : Return address
- // rsp[8] : Argument n
- // rsp[16] : Argument n-1
+ // rsp[8] : Argument 0 (receiver: callable to call)
+ // rsp[16] : Argument 1
// ...
- // rsp[8 * n] : Argument 1
- // rsp[8 * (n + 1)] : Argument 0 (receiver: callable to call)
- // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
+ // rsp[8 * n] : Argument n-1
+ // rsp[8 * (n + 1)] : Argument n
// rax contains the number of arguments, n, not counting the receiver.
-#ifdef V8_REVERSE_JSARGS
// 1. Get the callable to call (passed as receiver) from the stack.
{
StackArgumentsAccessor args(rax);
@@ -1870,43 +1720,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ PushReturnAddressFrom(rbx);
__ decq(rax); // One fewer argument (first argument is new receiver).
-#else
- // 1. Make sure we have at least one argument.
- {
- Label done;
- __ testq(rax, rax);
- __ j(not_zero, &done, Label::kNear);
- __ PopReturnAddressTo(rbx);
- __ PushRoot(RootIndex::kUndefinedValue);
- __ PushReturnAddressFrom(rbx);
- __ incq(rax);
- __ bind(&done);
- }
-
- // 2. Get the callable to call (passed as receiver) from the stack.
- {
- StackArgumentsAccessor args(rax);
- __ movq(rdi, args.GetReceiverOperand());
- }
-
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- {
- Label loop;
- __ movq(rcx, rax);
- StackArgumentsAccessor args(rcx);
- __ bind(&loop);
- __ movq(rbx, args[1]);
- __ movq(args[0], rbx);
- __ decq(rcx);
- __ j(not_zero, &loop); // While non-zero.
- __ DropUnderReturnAddress(1, rbx); // Drop one slot under return address.
- __ decq(rax); // One fewer argument (first argument is new receiver).
- }
-#endif
-
- // 4. Call the callable.
+ // 5. Call the callable.
// Since we did not create a frame for Function.prototype.call() yet,
// we use a normal Call builtin here.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -1916,11 +1730,10 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rsp[0] : return address
- // The order of args depends on V8_REVERSE_JSARGS
- // -- args[0] : receiver
- // -- args[1] : target
- // -- args[2] : thisArgument
- // -- args[3] : argumentsList
+ // -- rsp[8] : receiver
+ // -- rsp[16] : target (if argc >= 1)
+ // -- rsp[24] : thisArgument (if argc >= 2)
+ // -- rsp[32] : argumentsList (if argc == 3)
// -----------------------------------
// 1. Load target into rdi (if present), argumentsList into rbx (if present),
@@ -1968,11 +1781,10 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rsp[0] : return address
- // The order of args depends on V8_REVERSE_JSARGS
- // -- args[0] : receiver
- // -- args[1] : target
- // -- args[2] : argumentsList
- // -- args[3] : new.target (optional)
+ // -- rsp[8] : receiver
+ // -- rsp[16] : target
+ // -- rsp[24] : argumentsList
+ // -- rsp[32] : new.target (optional)
// -----------------------------------
// 1. Load target into rdi (if present), argumentsList into rbx (if present),
@@ -2065,26 +1877,18 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- rdi : function (passed through to callee)
// -----------------------------------
- Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
+ Label dont_adapt_arguments, stack_overflow;
__ cmpq(rbx, Immediate(kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
__ LoadTaggedPointerField(
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-#ifndef V8_REVERSE_JSARGS
- // This optimization is disabled when the arguments are reversed.
- __ testl(
- FieldOperand(rcx, SharedFunctionInfo::kFlagsOffset),
- Immediate(SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask));
- __ j(not_zero, &skip_adapt_arguments);
-#endif
-
// -------------------------------------------
// Adapt arguments.
// -------------------------------------------
{
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, rbx, rcx, &stack_overflow);
+ __ StackOverflowCheck(rbx, rcx, &stack_overflow);
Label under_application, over_application, invoke;
__ cmpq(rax, rbx);
@@ -2095,11 +1899,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
-#ifdef V8_REVERSE_JSARGS
__ leaq(r8, Operand(rbp, rbx, times_system_pointer_size, offset));
-#else
- __ leaq(r8, Operand(rbp, rax, times_system_pointer_size, offset));
-#endif
__ Set(rax, -1); // account for receiver
Label copy;
@@ -2115,7 +1915,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Too few parameters: Actual < expected.
__ bind(&under_application);
{
-#ifdef V8_REVERSE_JSARGS
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
@@ -2141,29 +1940,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Update actual number of arguments.
__ movq(rax, rbx);
-#else // !V8_REVERSE_JSARGS
- // Copy receiver and all actual arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ leaq(r9, Operand(rbp, rax, times_system_pointer_size, offset));
- __ Set(r8, -1); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ incq(r8);
- __ Push(Operand(r9, 0));
- __ subq(r9, Immediate(kSystemPointerSize));
- __ cmpq(r8, rax);
- __ j(less, &copy);
-
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
- __ bind(&fill);
- __ incq(rax);
- __ Push(kScratchRegister);
- __ cmpq(rax, rbx);
- __ j(less, &fill);
-#endif // !V8_REVERSE_JSARGS
}
// Call the entry point.
@@ -2185,44 +1961,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
// -------------------------------------------
- // Skip adapt arguments.
- // -------------------------------------------
- __ bind(&skip_adapt_arguments);
- {
- // The callee cannot observe the actual arguments, so it's safe to just
- // pass the expected arguments by massaging the stack appropriately. See
- // http://bit.ly/v8-faster-calls-with-arguments-mismatch for details.
- Label under_application, over_application, invoke;
- __ PopReturnAddressTo(rcx);
- __ cmpq(rax, rbx);
- __ j(less, &under_application, Label::kNear);
-
- __ bind(&over_application);
- {
- // Remove superfluous parameters from the stack.
- __ xchgq(rax, rbx);
- __ subq(rbx, rax);
- __ leaq(rsp, Operand(rsp, rbx, times_system_pointer_size, 0));
- __ jmp(&invoke, Label::kNear);
- }
-
- __ bind(&under_application);
- {
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
- __ bind(&fill);
- __ incq(rax);
- __ Push(kScratchRegister);
- __ cmpq(rax, rbx);
- __ j(less, &fill);
- }
-
- __ bind(&invoke);
- __ PushReturnAddressFrom(rcx);
- }
-
- // -------------------------------------------
// Don't adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
@@ -2261,7 +1999,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ j(equal, &ok);
__ CmpInstanceType(map, FIXED_DOUBLE_ARRAY_TYPE);
__ j(not_equal, &fail);
- __ cmpl(rcx, Immediate(0));
+ __ Cmp(rcx, 0);
__ j(equal, &ok);
// Fall through.
__ bind(&fail);
@@ -2271,10 +2009,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
Label stack_overflow;
- Generate_StackOverflowCheck(masm, rcx, r8, &stack_overflow, Label::kNear);
+ __ StackOverflowCheck(rcx, r8, &stack_overflow, Label::kNear);
// Push additional arguments onto the stack.
-#ifdef V8_REVERSE_JSARGS
// Move the arguments already in the stack,
// including the receiver and the return address.
{
@@ -2321,30 +2058,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bind(&done);
__ addq(rax, current);
}
-#else // !V8_REVERSE_JSARGS
- {
- Register value = scratch;
- __ PopReturnAddressTo(r8);
- __ Set(r9, 0);
- Label done, push, loop;
- __ bind(&loop);
- __ cmpl(r9, rcx);
- __ j(equal, &done, Label::kNear);
- // Turn the hole into undefined as we go.
- __ LoadAnyTaggedField(value, FieldOperand(rbx, r9, times_tagged_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(value, RootIndex::kTheHoleValue);
- __ j(not_equal, &push, Label::kNear);
- __ LoadRoot(value, RootIndex::kUndefinedValue);
- __ bind(&push);
- __ Push(value);
- __ incl(r9);
- __ jmp(&loop);
- __ bind(&done);
- __ PushReturnAddressFrom(r8);
- __ addq(rax, r9);
- }
-#endif
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
@@ -2426,10 +2139,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -----------------------------------
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, r8, r12, &stack_overflow, Label::kNear);
+ __ StackOverflowCheck(r8, r12, &stack_overflow, Label::kNear);
// Forward the arguments from the caller frame.
-#ifdef V8_REVERSE_JSARGS
// Move the arguments already in the stack,
// including the receiver and the return address.
{
@@ -2476,21 +2188,6 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
kScratchRegister);
__ j(not_zero, &loop);
}
-#else
- {
- Label loop;
- __ addl(rax, r8);
- __ PopReturnAddressTo(rcx);
- __ bind(&loop);
- {
- __ decl(r8);
- __ Push(Operand(rbx, r8, times_system_pointer_size,
- kFPOnStackSize + kPCOnStackSize));
- __ j(not_zero, &loop);
- }
- __ PushReturnAddressFrom(rcx);
- }
-#endif
}
__ jmp(&stack_done, Label::kNear);
__ bind(&stack_overflow);
@@ -2652,7 +2349,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// We are not trying to catch interruptions (i.e. debug break and
// preemption) here, so check the "real stack limit".
__ cmpq(kScratchRegister,
- StackLimitAsOperand(masm, StackLimitKind::kRealStackLimit));
+ __ StackLimitAsOperand(StackLimitKind::kRealStackLimit));
__ j(above_equal, &done, Label::kNear);
{
FrameScope scope(masm, StackFrame::MANUAL);
@@ -2662,7 +2359,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
// Save Return Address and Receiver into registers.
__ Pop(r8);
__ Pop(r10);
@@ -2690,54 +2386,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Recover Receiver and Return Address.
__ Push(r10);
__ Push(r8);
-#else // !V8_REVERSE_JSARGS
- // Reserve stack space for the [[BoundArguments]].
- __ movq(kScratchRegister, rbx);
- __ AllocateStackSpace(kScratchRegister);
-
- // Adjust effective number of arguments to include return address.
- __ incl(rax);
-
- // Relocate arguments and return address down the stack.
- {
- Label loop;
- __ Set(rcx, 0);
- __ addq(rbx, rsp);
- __ bind(&loop);
- __ movq(kScratchRegister,
- Operand(rbx, rcx, times_system_pointer_size, 0));
- __ movq(Operand(rsp, rcx, times_system_pointer_size, 0),
- kScratchRegister);
- __ incl(rcx);
- __ cmpl(rcx, rax);
- __ j(less, &loop);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop;
- __ LoadTaggedPointerField(
- rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
- __ SmiUntagField(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ bind(&loop);
- // Instead of doing decl(rbx) here subtract kTaggedSize from the header
- // offset in order be able to move decl(rbx) right before the loop
- // condition. This is necessary in order to avoid flags corruption by
- // pointer decompression code.
- __ LoadAnyTaggedField(
- r12, FieldOperand(rcx, rbx, times_tagged_size,
- FixedArray::kHeaderSize - kTaggedSize));
- __ movq(Operand(rsp, rax, times_system_pointer_size, 0), r12);
- __ leal(rax, Operand(rax, 1));
- __ decl(rbx);
- __ j(greater, &loop);
- }
-
- // Adjust effective number of arguments (rax contains the number of
- // arguments from the call plus return address plus the number of
- // [[BoundArguments]]), so we need to subtract one for the return address.
- __ decl(rax);
-#endif // !V8_REVERSE_JSARGS
}
__ bind(&no_bound_arguments);
}
@@ -3339,6 +2987,27 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ EnterFrame(StackFrame::JS_TO_WASM);
// -------------------------------------------
+ // Compute offsets and prepare for GC.
+ // -------------------------------------------
+ // We will have to save a value indicating the GC the number
+ // of values on the top of the stack that have to be scanned before calling
+ // the Wasm function.
+ constexpr int kFrameMarkerOffset = -kSystemPointerSize;
+ constexpr int kGCScanSlotCountOffset =
+ kFrameMarkerOffset - kSystemPointerSize;
+ constexpr int kParamCountOffset = kGCScanSlotCountOffset - kSystemPointerSize;
+ constexpr int kReturnCountOffset = kParamCountOffset - kSystemPointerSize;
+ constexpr int kValueTypesArrayStartOffset =
+ kReturnCountOffset - kSystemPointerSize;
+ // We set and use this slot only when moving parameters into the parameter
+ // registers (so no GC scan is needed).
+ constexpr int kFunctionDataOffset =
+ kValueTypesArrayStartOffset - kSystemPointerSize;
+ constexpr int kLastSpillOffset = kFunctionDataOffset;
+ constexpr int kNumSpillSlots = 5;
+ __ subq(rsp, Immediate(kNumSpillSlots * kSystemPointerSize));
+
+ // -------------------------------------------
// Load the Wasm exported function data and the Wasm instance.
// -------------------------------------------
Register closure = rdi;
@@ -3363,6 +3032,25 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag));
// -------------------------------------------
+ // Increment the call count in function data.
+ // -------------------------------------------
+ __ SmiAddConstant(
+ MemOperand(function_data,
+ WasmExportedFunctionData::kCallCountOffset - kHeapObjectTag),
+ Smi::FromInt(1));
+
+ // -------------------------------------------
+ // Check if the call count reached the threshold.
+ // -------------------------------------------
+ Label compile_wrapper, compile_wrapper_done;
+ __ SmiCompare(
+ MemOperand(function_data,
+ WasmExportedFunctionData::kCallCountOffset - kHeapObjectTag),
+ Smi::FromInt(wasm::kGenericWrapperThreshold));
+ __ j(greater_equal, &compile_wrapper);
+ __ bind(&compile_wrapper_done);
+
+ // -------------------------------------------
// Load values from the signature.
// -------------------------------------------
Register foreign_signature = r11;
@@ -3371,9 +3059,10 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
MemOperand(function_data,
WasmExportedFunctionData::kSignatureOffset - kHeapObjectTag));
Register signature = foreign_signature;
- __ movq(signature,
- MemOperand(foreign_signature, wasm::ObjectAccess::ToTagged(
- Foreign::kForeignAddressOffset)));
+ __ LoadExternalPointerField(
+ signature,
+ FieldOperand(foreign_signature, Foreign::kForeignAddressOffset),
+ kForeignForeignAddressTag);
foreign_signature = no_reg;
Register return_count = r8;
__ movq(return_count,
@@ -3387,29 +3076,12 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
signature = no_reg;
// -------------------------------------------
- // Set up the stack.
+ // Store signature-related values to the stack.
// -------------------------------------------
// We store values on the stack to restore them after function calls.
// We cannot push values onto the stack right before the wasm call. The wasm
// function expects the parameters, that didn't fit into the registers, on the
// top of the stack.
- // We will have to save a value indicating the GC the number
- // of values on the top of the stack that have to be scanned before calling
- // the Wasm function.
- constexpr int kFrameMarkerOffset = -kSystemPointerSize;
- constexpr int kGCScanSlotCountOffset =
- kFrameMarkerOffset - kSystemPointerSize;
- constexpr int kParamCountOffset = kGCScanSlotCountOffset - kSystemPointerSize;
- constexpr int kReturnCountOffset = kParamCountOffset - kSystemPointerSize;
- constexpr int kValueTypesArrayStartOffset =
- kReturnCountOffset - kSystemPointerSize;
- // We set and use this slot only when moving parameters into the parameter
- // registers (so no GC scan is needed).
- constexpr int kFunctionDataOffset =
- kValueTypesArrayStartOffset - kSystemPointerSize;
- constexpr int kLastSpillOffset = kFunctionDataOffset;
- constexpr int kNumSpillSlots = 5;
- __ subq(rsp, Immediate(kNumSpillSlots * kSystemPointerSize));
__ movq(MemOperand(rbp, kParamCountOffset), param_count);
__ movq(MemOperand(rbp, kReturnCountOffset), return_count);
__ movq(MemOperand(rbp, kValueTypesArrayStartOffset), valuetypes_array_ptr);
@@ -3418,17 +3090,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// Parameter handling.
// -------------------------------------------
Label prepare_for_wasm_call;
- __ cmpl(param_count, Immediate(0));
+ __ Cmp(param_count, 0);
// IF we have 0 params: jump through parameter handling.
__ j(equal, &prepare_for_wasm_call);
- // ELSE:
- // Make sure we have the same number of arguments in order to be able to load
- // the arguments using static offsets below.
- __ cmpl(kJavaScriptCallArgCountRegister, param_count);
- __ Check(equal, AbortReason::kInvalidNumberOfJsArgs);
-
// -------------------------------------------
// Create 2 sections for integer and float params.
// -------------------------------------------
@@ -3514,7 +3180,6 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
Register current_param = rbx;
Register param_limit = rdx;
-#ifdef V8_REVERSE_JSARGS
constexpr int kReceiverOnStackSize = kSystemPointerSize;
__ movq(current_param,
Immediate(kFPOnStackSize + kPCOnStackSize + kReceiverOnStackSize));
@@ -3523,13 +3188,6 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ addq(param_limit,
Immediate(kFPOnStackSize + kPCOnStackSize + kReceiverOnStackSize));
const int increment = kSystemPointerSize;
-#else
- __ movq(current_param, param_count);
- __ shlq(current_param, Immediate(kSystemPointerSizeLog2));
- __ addq(current_param, Immediate(kFPOnStackSize));
- __ movq(param_limit, Immediate(kFPOnStackSize));
- const int increment = -kSystemPointerSize;
-#endif
Register param = rax;
// We have to check the types of the params. The ValueType array contains
// first the return then the param types.
@@ -3981,6 +3639,30 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ Call(BUILTIN_CODE(masm->isolate(), WasmFloat64ToNumber),
RelocInfo::CODE_TARGET);
__ jmp(&return_done);
+
+ // -------------------------------------------
+ // Kick off compilation.
+ // -------------------------------------------
+ __ bind(&compile_wrapper);
+ // Enable GC.
+ MemOperand GCScanSlotPlace = MemOperand(rbp, kGCScanSlotCountOffset);
+ __ movq(GCScanSlotPlace, Immediate(4));
+ // Save registers to the stack.
+ __ pushq(wasm_instance);
+ __ pushq(function_data);
+ // Push the arguments for the runtime call.
+ __ Push(wasm_instance); // first argument
+ __ Push(function_data); // second argument
+ // Set up context.
+ __ Move(kContextRegister, Smi::zero());
+ // Call the runtime function that kicks off compilation.
+ __ CallRuntime(Runtime::kWasmCompileWrapper, 2);
+ // Pop the result.
+ __ movq(r9, kReturnRegister0);
+ // Restore registers from the stack.
+ __ popq(function_data);
+ __ popq(wasm_instance);
+ __ jmp(&compile_wrapper_done);
}
namespace {
@@ -4156,12 +3838,12 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- rbx : call data
// -- rdi : holder
// -- rsp[0] : return address
- // -- rsp[8] : argument argc
+ // -- rsp[8] : argument 0 (receiver)
+ // -- rsp[16] : argument 1
// -- ...
- // -- rsp[argc * 8] : argument 1
- // -- rsp[(argc + 1) * 8] : argument 0 (receiver)
+ // -- rsp[argc * 8] : argument (argc - 1)
+ // -- rsp[(argc + 1) * 8] : argument argc
// -----------------------------------
- // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
Register api_function_address = rdx;
Register argc = rcx;
@@ -4220,13 +3902,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
-#ifdef V8_REVERSE_JSARGS
__ leaq(scratch,
Operand(scratch, (FCA::kArgsLength + 1) * kSystemPointerSize));
-#else
- __ leaq(scratch, Operand(scratch, argc, times_system_pointer_size,
- (FCA::kArgsLength - 1) * kSystemPointerSize));
-#endif
__ movq(StackSpaceOperand(1), scratch);
// FunctionCallbackInfo::length_.
@@ -4336,7 +4013,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
__ LoadExternalPointerField(
api_function_address,
- FieldOperand(scratch, Foreign::kForeignAddressOffset));
+ FieldOperand(scratch, Foreign::kForeignAddressOffset),
+ kForeignForeignAddressTag);
// +3 is to skip prolog, return address and name handle.
Operand return_value_operand(
@@ -4352,6 +4030,223 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ int3(); // Unused on this architecture.
}
+namespace {
+
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Save all double registers, they will later be copied to the deoptimizer's
+ // FrameDescription.
+ static constexpr int kDoubleRegsSize =
+ kDoubleSize * XMMRegister::kNumRegisters;
+ __ AllocateStackSpace(kDoubleRegsSize);
+
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ XMMRegister xmm_reg = XMMRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ Movsd(Operand(rsp, offset), xmm_reg);
+ }
+
+ // Save all general purpose registers, they will later be copied to the
+ // deoptimizer's FrameDescription.
+ static constexpr int kNumberOfRegisters = Register::kNumRegisters;
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ __ pushq(Register::from_code(i));
+ }
+
+ static constexpr int kSavedRegistersAreaSize =
+ kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
+ static constexpr int kCurrentOffsetToReturnAddress = kSavedRegistersAreaSize;
+ static constexpr int kCurrentOffsetToParentSP =
+ kCurrentOffsetToReturnAddress + kPCOnStackSize;
+
+ __ Store(
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate),
+ rbp);
+
+ // We use this to keep the value of the fifth argument temporarily.
+ // Unfortunately we can't store it directly in r8 (used for passing
+ // this on linux), since it is another parameter passing register on windows.
+ Register arg5 = r11;
+
+ __ movq(arg_reg_3, Immediate(Deoptimizer::kFixedExitSizeMarker));
+ // Get the address of the location in the code object
+ // and compute the fp-to-sp delta in register arg5.
+ __ movq(arg_reg_4, Operand(rsp, kCurrentOffsetToReturnAddress));
+ // Load the fp-to-sp-delta.
+ __ leaq(arg5, Operand(rsp, kCurrentOffsetToParentSP));
+ __ subq(arg5, rbp);
+ __ negq(arg5);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(6);
+ __ movq(rax, Immediate(0));
+ Label context_check;
+ __ movq(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(rdi, &context_check);
+ __ movq(rax, Operand(rbp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ movq(arg_reg_1, rax);
+ __ Set(arg_reg_2, static_cast<int>(deopt_kind));
+ // Args 3 and 4 are already in the right registers.
+
+ // On windows put the arguments on the stack (PrepareCallCFunction
+ // has created space for this). On linux pass the arguments in r8 and r9.
+#ifdef V8_TARGET_OS_WIN
+ __ movq(Operand(rsp, 4 * kSystemPointerSize), arg5);
+ __ LoadAddress(arg5, ExternalReference::isolate_address(isolate));
+ __ movq(Operand(rsp, 5 * kSystemPointerSize), arg5);
+#else
+ __ movq(r8, arg5);
+ __ LoadAddress(r9, ExternalReference::isolate_address(isolate));
+#endif
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+ // Preserve deoptimizer object in register rax and get the input
+ // frame descriptor pointer.
+ __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
+
+ // Fill in the input registers.
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ __ PopQuad(Operand(rbx, offset));
+ }
+
+ // Fill in the double input registers.
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ __ popq(Operand(rbx, dst_offset));
+ }
+
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ __ movb(__ ExternalReferenceAsOperand(
+ ExternalReference::stack_is_iterable_address(isolate)),
+ Immediate(0));
+
+ // Remove the return address from the stack.
+ __ addq(rsp, Immediate(kPCOnStackSize));
+
+ // Compute a pointer to the unwinding limit in register rcx; that is
+ // the first stack slot not part of the input frame.
+ __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ addq(rcx, rsp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ leaq(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+ Label pop_loop_header;
+ __ jmp(&pop_loop_header);
+ Label pop_loop;
+ __ bind(&pop_loop);
+ __ Pop(Operand(rdx, 0));
+ __ addq(rdx, Immediate(sizeof(intptr_t)));
+ __ bind(&pop_loop_header);
+ __ cmpq(rcx, rsp);
+ __ j(not_equal, &pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ pushq(rax);
+ __ PrepareCallCFunction(2);
+ __ movq(arg_reg_1, rax);
+ __ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate));
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 2);
+ }
+ __ popq(rax);
+
+ __ movq(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: rax = current FrameDescription**, rdx = one past the
+ // last FrameDescription**.
+ __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
+ __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
+ __ leaq(rdx, Operand(rax, rdx, times_system_pointer_size, 0));
+ __ jmp(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
+ __ movq(rbx, Operand(rax, 0));
+ __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ subq(rcx, Immediate(sizeof(intptr_t)));
+ __ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
+ __ bind(&inner_loop_header);
+ __ testq(rcx, rcx);
+ __ j(not_zero, &inner_push_loop);
+ __ addq(rax, Immediate(kSystemPointerSize));
+ __ bind(&outer_loop_header);
+ __ cmpq(rax, rdx);
+ __ j(below, &outer_push_loop);
+
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ XMMRegister xmm_reg = XMMRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ Movsd(xmm_reg, Operand(rbx, src_offset));
+ }
+
+ // Push pc and continuation from the last output frame.
+ __ PushQuad(Operand(rbx, FrameDescription::pc_offset()));
+ __ PushQuad(Operand(rbx, FrameDescription::continuation_offset()));
+
+ // Push the registers from the last output frame.
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ __ PushQuad(Operand(rbx, offset));
+ }
+
+ // Restore the registers from the stack.
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ Register r = Register::from_code(i);
+ // Do not restore rsp, simply pop the value into the next register
+ // and overwrite this afterwards.
+ if (r == rsp) {
+ DCHECK_GT(i, 0);
+ r = Register::from_code(i - 1);
+ }
+ __ popq(r);
+ }
+
+ __ movb(__ ExternalReferenceAsOperand(
+ ExternalReference::stack_is_iterable_address(isolate)),
+ Immediate(1));
+
+ // Return to the continuation point.
+ __ ret(0);
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/codegen/DIR_METADATA b/deps/v8/src/codegen/DIR_METADATA
new file mode 100644
index 0000000000..fc018666b1
--- /dev/null
+++ b/deps/v8/src/codegen/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Compiler"
+} \ No newline at end of file
diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS
index 7b3ad8d1e0..332c1705b5 100644
--- a/deps/v8/src/codegen/OWNERS
+++ b/deps/v8/src/codegen/OWNERS
@@ -17,5 +17,3 @@ solanes@chromium.org
tebbi@chromium.org
titzer@chromium.org
mythria@chromium.org
-
-# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 00d0644f73..cc5d6299f5 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -552,6 +552,15 @@ Assembler::~Assembler() { DCHECK_EQ(const_pool_blocked_nesting_, 0); }
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
// Emit constant pool if necessary.
CheckConstPool(true, false);
DCHECK(pending_32_bit_constants_.empty());
@@ -2649,12 +2658,30 @@ static bool FitsVmovIntImm(uint64_t imm, uint32_t* encoding, uint8_t* cmode) {
return false;
}
+void Assembler::vmov(const DwVfpRegister dst, uint64_t imm) {
+ uint32_t enc;
+ uint8_t cmode;
+ uint8_t op = 0;
+ if (CpuFeatures::IsSupported(NEON) && FitsVmovIntImm(imm, &enc, &cmode)) {
+ CpuFeatureScope scope(this, NEON);
+ // Instruction details available in ARM DDI 0406C.b, A8-937.
+ // 001i1(27-23) | D(22) | 000(21-19) | imm3(18-16) | Vd(15-12) | cmode(11-8)
+ // | 0(7) | 0(6) | op(5) | 4(1) | imm4(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ emit(kSpecialCondition | 0x05 * B23 | d * B22 | vd * B12 | cmode * B8 |
+ op * B5 | 0x1 * B4 | enc);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
void Assembler::vmov(const QwNeonRegister dst, uint64_t imm) {
uint32_t enc;
uint8_t cmode;
uint8_t op = 0;
- if (CpuFeatures::IsSupported(VFPv3) && FitsVmovIntImm(imm, &enc, &cmode)) {
- CpuFeatureScope scope(this, VFPv3);
+ if (CpuFeatures::IsSupported(NEON) && FitsVmovIntImm(imm, &enc, &cmode)) {
+ CpuFeatureScope scope(this, NEON);
// Instruction details available in ARM DDI 0406C.b, A8-937.
// 001i1(27-23) | D(22) | 000(21-19) | imm3(18-16) | Vd(15-12) | cmode(11-8)
// | 0(7) | Q(6) | op(5) | 4(1) | imm4(3-0)
@@ -3677,6 +3704,28 @@ void Assembler::vld1(NeonSize size, const NeonListOperand& dst,
src.rm().code());
}
+// vld1s(ingle element to one lane).
+void Assembler::vld1s(NeonSize size, const NeonListOperand& dst, uint8_t index,
+ const NeonMemOperand& src) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.322.
+ // 1111(31-28) | 01001(27-23) | D(22) | 10(21-20) | Rn(19-16) |
+ // Vd(15-12) | size(11-10) | index_align(7-4) | Rm(3-0)
+ // See vld1 (single element to all lanes) if size == 0x3, implemented as
+ // vld1r(eplicate).
+ DCHECK_NE(size, 0x3);
+ // Check for valid lane indices.
+ DCHECK_GT(1 << (3 - size), index);
+ // Specifying alignment not supported, use standard alignment.
+ uint8_t index_align = index << (size + 1);
+
+ DCHECK(IsEnabled(NEON));
+ int vd, d;
+ dst.base().split_code(&vd, &d);
+ emit(0xFU * B28 | 4 * B24 | 1 * B23 | d * B22 | 2 * B20 |
+ src.rn().code() * B16 | vd * B12 | size * B10 | index_align * B4 |
+ src.rm().code());
+}
+
// vld1r(eplicate)
void Assembler::vld1r(NeonSize size, const NeonListOperand& dst,
const NeonMemOperand& src) {
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index 18631e2ece..cb8b7628f5 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -839,6 +839,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// All these APIs support D0 to D31 and Q0 to Q15.
void vld1(NeonSize size, const NeonListOperand& dst,
const NeonMemOperand& src);
+ // vld1s(ingle element to one lane).
+ void vld1s(NeonSize size, const NeonListOperand& dst, uint8_t index,
+ const NeonMemOperand& src);
void vld1r(NeonSize size, const NeonListOperand& dst,
const NeonMemOperand& src);
void vst1(NeonSize size, const NeonListOperand& src,
@@ -853,6 +856,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmov(NeonDataType dt, DwVfpRegister dst, int index, Register src);
void vmov(NeonDataType dt, Register dst, DwVfpRegister src, int index);
+ void vmov(DwVfpRegister dst, uint64_t imm);
void vmov(QwNeonRegister dst, uint64_t imm);
void vmov(QwNeonRegister dst, QwNeonRegister src);
void vdup(NeonSize size, QwNeonRegister dst, Register src);
diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
index 731d175393..96bf2ae50c 100644
--- a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
@@ -278,54 +278,6 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 3);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 319ee39ef7..b72e385d58 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -163,12 +163,10 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependentBuiltin(*code));
int builtin_index = Builtins::kNoBuiltinId;
- bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
+ bool target_is_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
- if (options().use_pc_relative_calls_and_jumps &&
- target_is_isolate_independent_builtin) {
+ if (options().use_pc_relative_calls_and_jumps && target_is_builtin) {
int32_t code_target_index = AddCodeTarget(code);
b(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
return;
@@ -178,13 +176,12 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
// size s.t. pc-relative calls may be used.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- int offset = code->builtin_index() * kSystemPointerSize +
- IsolateData::builtin_entry_table_offset();
+ int offset = IsolateData::builtin_entry_slot_offset(
+ static_cast<Builtins::Name>(code->builtin_index()));
ldr(scratch, MemOperand(kRootRegister, offset));
Jump(scratch, cond);
return;
- } else if (options().inline_offheap_trampolines &&
- target_is_isolate_independent_builtin) {
+ } else if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
@@ -258,12 +255,10 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependentBuiltin(*code));
int builtin_index = Builtins::kNoBuiltinId;
- bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
+ bool target_is_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
- if (target_is_isolate_independent_builtin &&
- options().use_pc_relative_calls_and_jumps) {
+ if (target_is_builtin && options().use_pc_relative_calls_and_jumps) {
int32_t code_target_index = AddCodeTarget(code);
bl(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
return;
@@ -271,13 +266,12 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
// This branch is taken only for specific cctests, where we force isolate
// creation at runtime. At this point, Code space isn't restricted to a
// size s.t. pc-relative calls may be used.
- int offset = code->builtin_index() * kSystemPointerSize +
- IsolateData::builtin_entry_table_offset();
+ int offset = IsolateData::builtin_entry_slot_offset(
+ static_cast<Builtins::Name>(code->builtin_index()));
ldr(ip, MemOperand(kRootRegister, offset));
Call(ip, cond);
return;
- } else if (target_is_isolate_independent_builtin &&
- options().inline_offheap_trampolines) {
+ } else if (target_is_builtin && options().inline_offheap_trampolines) {
// Inline the trampoline.
CallBuiltin(builtin_index);
return;
@@ -1553,22 +1547,102 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
mov(sp, dst_reg);
}
+void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ CHECK(is_int32(offset));
+ ldr(destination, MemOperand(kRootRegister, offset));
+}
+
+void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ sub(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
+ b(le, stack_overflow); // Signed comparison.
+}
+
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeFlag flag) {
Label regular_invoke;
-
- // Check whether the expected and actual arguments count match. If not,
- // setup registers according to contract with ArgumentsAdaptorTrampoline:
// r0: actual arguments count
// r1: function (passed through to callee)
// r2: expected arguments count
-
- // The code below is made a lot easier because the calling code already sets
- // up actual and expected registers according to the contract.
DCHECK_EQ(actual_parameter_count, r0);
DCHECK_EQ(expected_parameter_count, r2);
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // If the expected parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
+ cmp(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
+ b(eq, &regular_invoke);
+
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
+ sub(expected_parameter_count, expected_parameter_count,
+ actual_parameter_count, SetCC);
+ b(le, &regular_invoke);
+
+ Label stack_overflow;
+ Register scratch = r4;
+ StackOverflowCheck(expected_parameter_count, scratch, &stack_overflow);
+
+ // Underapplication. Move the arguments already in the stack, including the
+ // receiver and the return address.
+ {
+ Label copy, check;
+ Register num = r5, src = r6, dest = r9; // r7 and r8 are context and root.
+ mov(src, sp);
+ // Update stack pointer.
+ lsl(scratch, expected_parameter_count, Operand(kSystemPointerSizeLog2));
+ AllocateStackSpace(scratch);
+ mov(dest, sp);
+ mov(num, actual_parameter_count);
+ b(&check);
+ bind(&copy);
+ ldr(scratch, MemOperand(src, kSystemPointerSize, PostIndex));
+ str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
+ sub(num, num, Operand(1), SetCC);
+ bind(&check);
+ b(ge, &copy);
+ }
+
+ // Fill remaining expected arguments with undefined values.
+ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ {
+ Label loop;
+ bind(&loop);
+ str(scratch, MemOperand(r9, kSystemPointerSize, PostIndex));
+ sub(expected_parameter_count, expected_parameter_count, Operand(1), SetCC);
+ b(gt, &loop);
+ }
+ b(&regular_invoke);
+
+ bind(&stack_overflow);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ CallRuntime(Runtime::kThrowStackOverflow);
+ bkpt(0);
+ }
+#else
+ // Check whether the expected and actual arguments count match. If not,
+ // setup registers according to contract with ArgumentsAdaptorTrampoline.
cmp(expected_parameter_count, actual_parameter_count);
b(eq, &regular_invoke);
@@ -1579,7 +1653,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
}
- bind(&regular_invoke);
+#endif
+ bind(&regular_invoke);
}
void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
@@ -2140,6 +2215,23 @@ void TurboAssembler::RestoreFPRegs(Register location, Register scratch) {
add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
}
+void TurboAssembler::SaveFPRegsToHeap(Register location, Register scratch) {
+ CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
+ CheckFor32DRegs(scratch);
+ vstm(ia_w, location, d0, d15);
+ vstm(ia_w, location, d16, d31, ne);
+ add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
+}
+
+void TurboAssembler::RestoreFPRegsFromHeap(Register location,
+ Register scratch) {
+ CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
+ CheckFor32DRegs(scratch);
+ vldm(ia_w, location, d0, d15);
+ vldm(ia_w, location, d16, d31, ne);
+ add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
+}
+
template <typename T>
void TurboAssembler::FloatMaxHelper(T result, T left, T right,
Label* out_of_line) {
@@ -2280,16 +2372,18 @@ void TurboAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
}
static const int kRegisterPassedArguments = 4;
+// The hardfloat calling convention passes double arguments in registers d0-d7.
+static const int kDoubleRegisterPassedArguments = 8;
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
if (use_eabi_hardfloat()) {
- // In the hard floating point calling convention, we can use all double
+ // In the hard floating point calling convention, we can use the first 8
// registers to pass doubles.
- if (num_double_arguments > DoubleRegister::SupportedRegisterCount()) {
+ if (num_double_arguments > kDoubleRegisterPassedArguments) {
stack_passed_words +=
- 2 * (num_double_arguments - DoubleRegister::SupportedRegisterCount());
+ 2 * (num_double_arguments - kDoubleRegisterPassedArguments);
}
} else {
// In the soft floating point calling convention, every double
@@ -2491,26 +2585,18 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- Label* exit, DeoptimizeKind kind) {
+void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
+ Label* exit, DeoptimizeKind kind,
+ Label*) {
+ BlockConstPoolScope block_const_pool(this);
+ ldr(ip, MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(target)));
+ Call(ip);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
USE(exit, kind);
- NoRootArrayScope no_root_array(this);
-
- // Save the deopt id in r10 (we don't need the roots array from now on).
- DCHECK_LE(deopt_id, 0xFFFF);
- if (CpuFeatures::IsSupported(ARMv7)) {
- // On ARMv7, we can use movw (with a maximum immediate of 0xFFFF)
- movw(r10, deopt_id);
- } else {
- // On ARMv6, we might need two instructions.
- mov(r10, Operand(deopt_id & 0xFF)); // Set the low byte.
- if (deopt_id >= 0xFF) {
- orr(r10, r10, Operand(deopt_id & 0xFF00)); // Set the high byte.
- }
- }
-
- Call(target, RelocInfo::RUNTIME_ENTRY);
- CheckConstPool(false, false);
}
void TurboAssembler::Trap() { stop(); }
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index a7dc5498b8..a4d6632a07 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -17,6 +17,10 @@
namespace v8 {
namespace internal {
+// TODO(victorgomes): Move definition to macro-assembler.h, once all other
+// platforms are updated.
+enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
// ----------------------------------------------------------------------------
// Static helper functions
@@ -320,10 +324,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- // This should only be used when assembling a deoptimizer call because of
- // the CheckConstPool invocation, which is only needed for deoptimization.
- void CallForDeoptimization(Address target, int deopt_id, Label* exit,
- DeoptimizeKind kind);
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
@@ -395,6 +398,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// values to location, restoring [d0..(d15|d31)].
void RestoreFPRegs(Register location, Register scratch);
+ // As above, but with heap semantics instead of stack semantics, i.e.: the
+ // location starts at the lowest address and grows towards higher addresses,
+ // for both saves and restores.
+ void SaveFPRegsToHeap(Register location, Register scratch);
+ void RestoreFPRegsFromHeap(Register location, Register scratch);
+
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
@@ -731,11 +740,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// TODO(victorgomes): Remove this function once we stick with the reversed
// arguments order.
MemOperand ReceiverOperand(Register argc) {
-#ifdef V8_REVERSE_JSARGS
return MemOperand(sp, 0);
-#else
- return MemOperand(sp, argc, LSL, kSystemPointerSizeLog2);
-#endif
}
// ---------------------------------------------------------------------------
@@ -781,6 +786,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register scratch2);
// ---------------------------------------------------------------------------
+ // Stack limit utilities
+ void LoadStackLimit(Register destination, StackLimitKind kind);
+ void StackOverflowCheck(Register num_args, Register scratch,
+ Label* stack_overflow);
+
+ // ---------------------------------------------------------------------------
// Smi utilities
void SmiTag(Register reg, SBit s = LeaveCC);
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index 2e21ab913d..4aaa413d2d 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -372,6 +372,15 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
// Emit constant pool if necessary.
ForceConstantPoolEmissionWithoutJump();
DCHECK(constpool_.IsEmpty());
@@ -403,7 +412,9 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
}
void Assembler::Align(int m) {
- DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
+ // If not, the loop below won't terminate.
+ DCHECK(IsAligned(pc_offset(), kInstrSize));
+ DCHECK(m >= kInstrSize && base::bits::IsPowerOfTwo(m));
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -3199,9 +3210,11 @@ void Assembler::movi(const VRegister& vd, const uint64_t imm, Shift shift,
Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI |
ImmNEONabcdefgh(imm8) | NEONCmode(0xE) | Rd(vd));
} else if (shift == LSL) {
+ DCHECK(is_uint8(imm));
NEONModifiedImmShiftLsl(vd, static_cast<int>(imm), shift_amount,
NEONModifiedImmediate_MOVI);
} else {
+ DCHECK(is_uint8(imm));
NEONModifiedImmShiftMsl(vd, static_cast<int>(imm), shift_amount,
NEONModifiedImmediate_MOVI);
}
diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc b/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
index 0c9beba776..f7bccfdbe2 100644
--- a/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
@@ -282,54 +282,6 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 3);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index fef1758aaa..69242484bc 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -503,13 +503,15 @@ void TurboAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift,
}
void TurboAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) {
- // TODO(all): Move 128-bit values in a more efficient way.
+ // TODO(v8:11033): Move 128-bit values in a more efficient way.
DCHECK(vd.Is128Bits());
- UseScratchRegisterScope temps(this);
Movi(vd.V2D(), lo);
- Register temp = temps.AcquireX();
- Mov(temp, hi);
- Ins(vd.V2D(), 1, temp);
+ if (lo != hi) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Mov(temp, hi);
+ Ins(vd.V2D(), 1, temp);
+ }
}
void TurboAssembler::Mvn(const Register& rd, const Operand& operand) {
@@ -869,7 +871,7 @@ bool TurboAssembler::NeedExtraInstructionsOrRegisterBranch(
unresolved_branches_.insert(std::pair<int, FarBranchInfo>(
max_reachable_pc, FarBranchInfo(pc_offset(), label)));
// Also maintain the next pool check.
- next_veneer_pool_check_ = Min(
+ next_veneer_pool_check_ = std::min(
next_veneer_pool_check_, max_reachable_pc - kVeneerDistanceCheckMargin);
}
return need_longer_range;
@@ -1778,8 +1780,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
@@ -1833,8 +1834,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index)) {
// Inline the trampoline.
CallBuiltin(builtin_index);
return;
@@ -1880,6 +1880,13 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
}
}
+void TurboAssembler::LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
+ Register destination) {
+ Ldr(destination,
+ MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(builtin_index)));
+}
+
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
@@ -2005,15 +2012,11 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) {
return is_int26(offset);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- Label* exit, DeoptimizeKind kind) {
+void TurboAssembler::CallForDeoptimization(
+ Builtins::Name target, int deopt_id, Label* exit, DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label) {
BlockPoolsScope scope(this);
- int64_t offset = static_cast<int64_t>(target) -
- static_cast<int64_t>(options().code_range_start);
- DCHECK_EQ(offset % kInstrSize, 0);
- offset = offset / static_cast<int>(kInstrSize);
- DCHECK(IsNearCallOffset(offset));
- near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
+ bl(jump_deoptimization_entry_label);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
@@ -2076,23 +2079,148 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
Mov(sp, dst_reg);
}
-void MacroAssembler::InvokePrologue(Register expected_parameter_count,
- Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
- Label regular_invoke;
+void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
- // Check whether the expected and actual arguments count match. The registers
- // are set up according to contract with ArgumentsAdaptorTrampoline:
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ Ldr(destination, MemOperand(kRootRegister, offset));
+}
+
+void MacroAssembler::StackOverflowCheck(Register num_args,
+ Label* stack_overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+
+ // Check the stack for overflow.
+ // We are not trying to catch interruptions (e.g. debug break and
+ // preemption) here, so the "real stack limit" is checked.
+
+ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ Sub(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ Cmp(scratch, Operand(num_args, LSL, kSystemPointerSizeLog2));
+ B(le, stack_overflow);
+}
+
+void MacroAssembler::InvokePrologue(Register formal_parameter_count,
+ Register actual_argument_count, Label* done,
+ InvokeFlag flag) {
// x0: actual arguments count.
// x1: function (passed through to callee).
// x2: expected arguments count.
- // The code below is made a lot easier because the calling code already sets
- // up actual and expected registers according to the contract.
- DCHECK_EQ(actual_parameter_count, x0);
- DCHECK_EQ(expected_parameter_count, x2);
+ // x3: new target
+ Label regular_invoke;
+ DCHECK_EQ(actual_argument_count, x0);
+ DCHECK_EQ(formal_parameter_count, x2);
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // If the formal parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
+ Cmp(formal_parameter_count, Operand(kDontAdaptArgumentsSentinel));
+ B(eq, &regular_invoke);
+
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
+ Register extra_argument_count = x2;
+ Subs(extra_argument_count, formal_parameter_count, actual_argument_count);
+ B(le, &regular_invoke);
+
+ // The stack pointer in arm64 needs to be 16-byte aligned. We might need to
+ // (1) add an extra padding or (2) remove (re-use) the extra padding already
+ // in the stack. Let {slots_to_copy} be the number of slots (arguments) to
+ // move up in the stack and let {slots_to_claim} be the number of extra stack
+ // slots to claim.
+ Label even_extra_count, skip_move;
+ Register slots_to_copy = x4;
+ Register slots_to_claim = x5;
+
+ Add(slots_to_copy, actual_argument_count, 1); // Copy with receiver.
+ Mov(slots_to_claim, extra_argument_count);
+ Tbz(extra_argument_count, 0, &even_extra_count);
+
+ // Calculate {slots_to_claim} when {extra_argument_count} is odd.
+ // If {actual_argument_count} is even, we need one extra padding slot
+ // {slots_to_claim = extra_argument_count + 1}.
+ // If {actual_argument_count} is odd, we know that the
+ // original arguments will have a padding slot that we can reuse
+ // {slots_to_claim = extra_argument_count - 1}.
+ {
+ Register scratch = x11;
+ Add(slots_to_claim, extra_argument_count, 1);
+ And(scratch, actual_argument_count, 1);
+ Eor(scratch, scratch, 1);
+ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
+ }
+
+ Bind(&even_extra_count);
+ Cbz(slots_to_claim, &skip_move);
+
+ Label stack_overflow;
+ StackOverflowCheck(slots_to_claim, &stack_overflow);
+ Claim(slots_to_claim);
+ // Move the arguments already in the stack including the receiver.
+ {
+ Register src = x6;
+ Register dst = x7;
+ SlotAddress(src, slots_to_claim);
+ SlotAddress(dst, 0);
+ CopyDoubleWords(dst, src, slots_to_copy);
+ }
+
+ Bind(&skip_move);
+ Register actual_argument_with_receiver = x4;
+ Register pointer_next_value = x5;
+ Add(actual_argument_with_receiver, actual_argument_count,
+ 1); // {slots_to_copy} was scratched.
+
+ // Copy extra arguments as undefined values.
+ {
+ Label loop;
+ Register undefined_value = x6;
+ Register count = x7;
+ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
+ SlotAddress(pointer_next_value, actual_argument_with_receiver);
+ Mov(count, extra_argument_count);
+ Bind(&loop);
+ Str(undefined_value,
+ MemOperand(pointer_next_value, kSystemPointerSize, PostIndex));
+ Subs(count, count, 1);
+ Cbnz(count, &loop);
+ }
+
+ // Set padding if needed.
+ {
+ Label skip;
+ Register total_args_slots = x4;
+ Add(total_args_slots, actual_argument_with_receiver, extra_argument_count);
+ Tbz(total_args_slots, 0, &skip);
+ Str(padreg, MemOperand(pointer_next_value));
+ Bind(&skip);
+ }
+ B(&regular_invoke);
+
+ bind(&stack_overflow);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ CallRuntime(Runtime::kThrowStackOverflow);
+ Unreachable();
+ }
+#else
+ // Check whether the expected and actual arguments count match. The registers
+ // are set up according to contract with ArgumentsAdaptorTrampoline.ct.
// If actual == expected perform a regular invocation.
- Cmp(expected_parameter_count, actual_parameter_count);
+ Cmp(formal_parameter_count, actual_argument_count);
B(eq, &regular_invoke);
// The argument counts mismatch, generate a call to the argument adaptor.
@@ -2105,6 +2233,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
}
+#endif
Bind(&regular_invoke);
}
@@ -2136,7 +2265,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register actual_parameter_count,
InvokeFlag flag) {
// You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
DCHECK_EQ(function, x1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == x3);
@@ -2186,11 +2315,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Operand MacroAssembler::ReceiverOperand(Register arg_count) {
-#ifdef V8_REVERSE_JSARGS
return Operand(0);
-#else
- return Operand(arg_count, LSL, kXRegSizeLog2);
-#endif
}
void MacroAssembler::InvokeFunctionWithNewTarget(
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 655c44f819..b453a17394 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -145,6 +145,10 @@ enum PreShiftImmMode {
kAnyShift // Allow any pre-shift.
};
+// TODO(victorgomes): Move definition to macro-assembler.h, once all other
+// platforms are updated.
+enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
@@ -968,6 +972,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
+ Register destination);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index);
@@ -980,8 +986,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void CallForDeoptimization(Address target, int deopt_id, Label* exit,
- DeoptimizeKind kind);
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label);
// Calls a C function.
// The called function is not allowed to trigger a
@@ -1781,8 +1788,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
- // TODO(victorgomes): inline this function once we remove V8_REVERSE_JSARGS
- // flag.
Operand ReceiverOperand(const Register arg_count);
// ---- SMI and Number Utilities ----
@@ -1979,6 +1984,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register scratch2);
// ---------------------------------------------------------------------------
+ // Stack limit utilities
+ void LoadStackLimit(Register destination, StackLimitKind kind);
+ void StackOverflowCheck(Register num_args, Label* stack_overflow);
+
+ // ---------------------------------------------------------------------------
// Garbage collector support (GC).
// Notify the garbage collector that we wrote a pointer into an object.
@@ -2100,7 +2110,7 @@ class UseScratchRegisterScope {
#endif
available_->Remove(list);
}
- void Include(const Register& reg1, const Register& reg2) {
+ void Include(const Register& reg1, const Register& reg2 = NoReg) {
CPURegList list(reg1, reg2);
Include(list);
}
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index 31620ae965..fbbb0a18da 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -524,8 +524,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index 3d0b7d28e4..f23dccb53e 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -76,6 +76,15 @@ AssemblerOptions AssemblerOptions::Default(Isolate* isolate) {
return options;
}
+AssemblerOptions AssemblerOptions::DefaultForOffHeapTrampoline(
+ Isolate* isolate) {
+ AssemblerOptions options = AssemblerOptions::Default(isolate);
+ // Off-heap trampolines may not contain any metadata since their metadata
+ // offsets refer to the off-heap metadata area.
+ options.emit_code_comments = false;
+ return options;
+}
+
namespace {
class DefaultAssemblerBuffer : public AssemblerBuffer {
@@ -255,7 +264,9 @@ Handle<HeapObject> AssemblerBase::GetEmbeddedObject(
int Assembler::WriteCodeComments() {
- if (!FLAG_code_comments || code_comments_writer_.entry_count() == 0) return 0;
+ CHECK_IMPLIES(code_comments_writer_.entry_count() > 0,
+ options().emit_code_comments);
+ if (code_comments_writer_.entry_count() == 0) return 0;
int offset = pc_offset();
code_comments_writer_.Emit(this);
int size = pc_offset() - offset;
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index 6419e55cec..626bd04bc8 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -180,8 +180,11 @@ struct V8_EXPORT_PRIVATE AssemblerOptions {
// info. This is useful in some platform (Win64) where the unwind info depends
// on a function prologue/epilogue.
bool collect_win64_unwind_info = false;
+ // Whether to emit code comments.
+ bool emit_code_comments = FLAG_code_comments;
static AssemblerOptions Default(Isolate* isolate);
+ static AssemblerOptions DefaultForOffHeapTrampoline(Isolate* isolate);
};
class AssemblerBuffer {
@@ -226,6 +229,8 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
}
// Features are usually enabled by CpuFeatureScope, which also asserts that
// the features are supported before they are enabled.
+ // IMPORTANT: IsEnabled() should only be used by DCHECKs. For real feature
+ // detection, use IsSupported().
bool IsEnabled(CpuFeature f) {
return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
}
@@ -235,7 +240,9 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
bool is_constant_pool_available() const {
if (FLAG_enable_embedded_constant_pool) {
- return constant_pool_available_;
+ // We need to disable constant pool here for embeded builtins
+ // because the metadata section is not adjacent to instructions
+ return constant_pool_available_ && !options().isolate_independent_code;
} else {
// Embedded constant pool not supported on this architecture.
UNREACHABLE();
@@ -280,7 +287,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
// Record an inline code comment that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg) {
- if (FLAG_code_comments) {
+ if (options().emit_code_comments) {
code_comments_writer_.Add(pc_offset(), std::string(msg));
}
}
diff --git a/deps/v8/src/codegen/bailout-reason.h b/deps/v8/src/codegen/bailout-reason.h
index e55e691a08..267beb55e3 100644
--- a/deps/v8/src/codegen/bailout-reason.h
+++ b/deps/v8/src/codegen/bailout-reason.h
@@ -30,7 +30,6 @@ namespace internal {
V(kInvalidJumpTableIndex, "Invalid jump table index") \
V(kInvalidParametersAndRegistersInGenerator, \
"invalid parameters and registers in generator") \
- V(kInvalidNumberOfJsArgs, "Invalid number of JS arguments") \
V(kMissingBytecodeArray, "Missing bytecode array from function") \
V(kObjectNotTagged, "The object is not tagged") \
V(kObjectTagged, "The object is tagged") \
@@ -58,6 +57,7 @@ namespace internal {
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
+ V(kUnexpectedAdditionalPopValue, "Unexpected additional pop value") \
V(kUnexpectedElementsKindInArrayConstructor, \
"Unexpected ElementsKind in array constructor") \
V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
diff --git a/deps/v8/src/codegen/code-desc.h b/deps/v8/src/codegen/code-desc.h
index 16941b074a..e051bb459c 100644
--- a/deps/v8/src/codegen/code-desc.h
+++ b/deps/v8/src/codegen/code-desc.h
@@ -16,11 +16,11 @@ namespace internal {
// the buffer and grows backward. Inlined metadata sections may exist
// at the end of the instructions.
//
-// │<--------------- buffer_size ----------------------------------->│
-// │<---------------- instr_size ------------->│ │<-reloc_size->│
-// ├───────────────────────────────────────────┼──────┼──────────────┤
-// │ instructions │ data │ free │ reloc info │
-// ├───────────────────────────────────────────┴──────┴──────────────┘
+// |<--------------- buffer_size ----------------------------------->|
+// |<---------------- instr_size ------------->| |<-reloc_size->|
+// |--------------+----------------------------+------+--------------|
+// | instructions | data | free | reloc info |
+// +--------------+----------------------------+------+--------------+
// TODO(jgruber): Add a single chokepoint for specifying the instruction area
// layout (i.e. the order of inlined metadata fields).
@@ -62,6 +62,24 @@ class CodeDesc {
int code_comments_offset = 0;
int code_comments_size = 0;
+ // TODO(jgruber,v8:11036): Remove these functions once CodeDesc fields have
+ // been made consistent with Code layout.
+ int body_size() const { return instr_size + unwinding_info_size; }
+ int instruction_size() const { return safepoint_table_offset; }
+ int metadata_size() const { return body_size() - instruction_size(); }
+ int safepoint_table_offset_relative() const {
+ return safepoint_table_offset - instruction_size();
+ }
+ int handler_table_offset_relative() const {
+ return handler_table_offset - instruction_size();
+ }
+ int constant_pool_offset_relative() const {
+ return constant_pool_offset - instruction_size();
+ }
+ int code_comments_offset_relative() const {
+ return code_comments_offset - instruction_size();
+ }
+
// Relocation info is located at the end of the buffer and not part of the
// instructions area.
@@ -69,10 +87,14 @@ class CodeDesc {
int reloc_size = 0;
// Unwinding information.
- // TODO(jgruber): Pack this into the inlined metadata section.
byte* unwinding_info = nullptr;
int unwinding_info_size = 0;
+ int unwinding_info_offset_relative() const {
+ // TODO(jgruber,v8:11036): Remove this function once unwinding_info setup
+ // is more consistent with other metadata tables.
+ return code_comments_offset_relative() + code_comments_size;
+ }
Assembler* origin = nullptr;
};
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 184a31c8a3..ca340c69c8 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -85,7 +85,7 @@ void CodeStubAssembler::Assert(const NodeGenerator<BoolT>& condition_body,
#endif
}
-void CodeStubAssembler::Assert(SloppyTNode<Word32T> condition_node,
+void CodeStubAssembler::Assert(TNode<Word32T> condition_node,
const char* message, const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes) {
#if defined(DEBUG)
@@ -129,7 +129,7 @@ void CodeStubAssembler::Check(const NodeGenerator<BoolT>& condition_body,
Check(branch, message, file, line, extra_nodes);
}
-void CodeStubAssembler::Check(SloppyTNode<Word32T> condition_node,
+void CodeStubAssembler::Check(TNode<Word32T> condition_node,
const char* message, const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes) {
BranchGenerator branch = [=](Label* ok, Label* not_ok) {
@@ -197,24 +197,26 @@ void CodeStubAssembler::FailAssert(
Unreachable();
}
-TNode<Int32T> CodeStubAssembler::SelectInt32Constant(
- SloppyTNode<BoolT> condition, int true_value, int false_value) {
+TNode<Int32T> CodeStubAssembler::SelectInt32Constant(TNode<BoolT> condition,
+ int true_value,
+ int false_value) {
return SelectConstant<Int32T>(condition, Int32Constant(true_value),
Int32Constant(false_value));
}
-TNode<IntPtrT> CodeStubAssembler::SelectIntPtrConstant(
- SloppyTNode<BoolT> condition, int true_value, int false_value) {
+TNode<IntPtrT> CodeStubAssembler::SelectIntPtrConstant(TNode<BoolT> condition,
+ int true_value,
+ int false_value) {
return SelectConstant<IntPtrT>(condition, IntPtrConstant(true_value),
IntPtrConstant(false_value));
}
TNode<Oddball> CodeStubAssembler::SelectBooleanConstant(
- SloppyTNode<BoolT> condition) {
+ TNode<BoolT> condition) {
return SelectConstant<Oddball>(condition, TrueConstant(), FalseConstant());
}
-TNode<Smi> CodeStubAssembler::SelectSmiConstant(SloppyTNode<BoolT> condition,
+TNode<Smi> CodeStubAssembler::SelectSmiConstant(TNode<BoolT> condition,
Smi true_value,
Smi false_value) {
return SelectConstant<Smi>(condition, SmiConstant(true_value),
@@ -770,8 +772,7 @@ TNode<Smi> CodeStubAssembler::TrySmiAbs(TNode<Smi> a, Label* if_overflow) {
}
}
-TNode<Number> CodeStubAssembler::NumberMax(SloppyTNode<Number> a,
- SloppyTNode<Number> b) {
+TNode<Number> CodeStubAssembler::NumberMax(TNode<Number> a, TNode<Number> b) {
// TODO(danno): This could be optimized by specifically handling smi cases.
TVARIABLE(Number, result);
Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
@@ -789,8 +790,7 @@ TNode<Number> CodeStubAssembler::NumberMax(SloppyTNode<Number> a,
return result.value();
}
-TNode<Number> CodeStubAssembler::NumberMin(SloppyTNode<Number> a,
- SloppyTNode<Number> b) {
+TNode<Number> CodeStubAssembler::NumberMin(TNode<Number> a, TNode<Number> b) {
// TODO(danno): This could be optimized by specifically handling smi cases.
TVARIABLE(Number, result);
Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
@@ -1006,7 +1006,7 @@ TNode<Int32T> CodeStubAssembler::TruncateIntPtrToInt32(
return ReinterpretCast<Int32T>(value);
}
-TNode<BoolT> CodeStubAssembler::TaggedIsSmi(SloppyTNode<MaybeObject> a) {
+TNode<BoolT> CodeStubAssembler::TaggedIsSmi(TNode<MaybeObject> a) {
STATIC_ASSERT(kSmiTagMask < kMaxUInt32);
return Word32Equal(
Word32And(TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)),
@@ -1014,7 +1014,7 @@ TNode<BoolT> CodeStubAssembler::TaggedIsSmi(SloppyTNode<MaybeObject> a) {
Int32Constant(0));
}
-TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(SloppyTNode<MaybeObject> a) {
+TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(TNode<MaybeObject> a) {
return Word32BinaryNot(TaggedIsSmi(a));
}
@@ -1370,13 +1370,122 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(SloppyTNode<Object> value,
}
}
+TNode<ExternalPointerT> CodeStubAssembler::ChangeUint32ToExternalPointer(
+ TNode<Uint32T> value) {
+ STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
+ return ReinterpretCast<ExternalPointerT>(ChangeUint32ToWord(value));
+}
+
+TNode<Uint32T> CodeStubAssembler::ChangeExternalPointerToUint32(
+ TNode<ExternalPointerT> value) {
+ STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
+ return Unsigned(TruncateWordToInt32(ReinterpretCast<UintPtrT>(value)));
+}
+
+void CodeStubAssembler::InitializeExternalPointerField(TNode<HeapObject> object,
+ TNode<IntPtrT> offset) {
+#ifdef V8_HEAP_SANDBOX
+ TNode<ExternalReference> external_pointer_table_address = ExternalConstant(
+ ExternalReference::external_pointer_table_address(isolate()));
+ TNode<Uint32T> table_length = UncheckedCast<Uint32T>(
+ Load(MachineType::Uint32(), external_pointer_table_address,
+ UintPtrConstant(Internals::kExternalPointerTableLengthOffset)));
+ TNode<Uint32T> table_capacity = UncheckedCast<Uint32T>(
+ Load(MachineType::Uint32(), external_pointer_table_address,
+ UintPtrConstant(Internals::kExternalPointerTableCapacityOffset)));
+
+ Label grow_table(this, Label::kDeferred), finish(this);
+
+ TNode<BoolT> compare = Uint32LessThan(table_length, table_capacity);
+ Branch(compare, &finish, &grow_table);
+
+ BIND(&grow_table);
+ {
+ TNode<ExternalReference> table_grow_function = ExternalConstant(
+ ExternalReference::external_pointer_table_grow_table_function());
+ CallCFunction(
+ table_grow_function, MachineType::Pointer(),
+ std::make_pair(MachineType::Pointer(), external_pointer_table_address));
+ Goto(&finish);
+ }
+ BIND(&finish);
+
+ TNode<Uint32T> new_table_length = Uint32Add(table_length, Uint32Constant(1));
+ StoreNoWriteBarrier(
+ MachineRepresentation::kWord32, external_pointer_table_address,
+ UintPtrConstant(Internals::kExternalPointerTableLengthOffset),
+ new_table_length);
+
+ TNode<Uint32T> index = table_length;
+ TNode<ExternalPointerT> encoded = ChangeUint32ToExternalPointer(index);
+ StoreObjectFieldNoWriteBarrier<ExternalPointerT>(object, offset, encoded);
+#endif
+}
+
+TNode<RawPtrT> CodeStubAssembler::LoadExternalPointerFromObject(
+ TNode<HeapObject> object, TNode<IntPtrT> offset,
+ ExternalPointerTag external_pointer_tag) {
+#ifdef V8_HEAP_SANDBOX
+ TNode<ExternalReference> external_pointer_table_address = ExternalConstant(
+ ExternalReference::external_pointer_table_address(isolate()));
+ TNode<RawPtrT> table = UncheckedCast<RawPtrT>(
+ Load(MachineType::Pointer(), external_pointer_table_address,
+ UintPtrConstant(Internals::kExternalPointerTableBufferOffset)));
+
+ TNode<ExternalPointerT> encoded =
+ LoadObjectField<ExternalPointerT>(object, offset);
+ TNode<Word32T> index = ChangeExternalPointerToUint32(encoded);
+ // TODO(v8:10391, saelo): bounds check if table is not caged
+ TNode<IntPtrT> table_offset = ElementOffsetFromIndex(
+ ChangeUint32ToWord(index), SYSTEM_POINTER_ELEMENTS, 0);
+
+ TNode<UintPtrT> entry = Load<UintPtrT>(table, table_offset);
+ if (external_pointer_tag != 0) {
+ TNode<UintPtrT> tag = UintPtrConstant(external_pointer_tag);
+ entry = UncheckedCast<UintPtrT>(WordXor(entry, tag));
+ }
+ return UncheckedCast<RawPtrT>(UncheckedCast<WordT>(entry));
+#else
+ return LoadObjectField<RawPtrT>(object, offset);
+#endif // V8_HEAP_SANDBOX
+}
+
+void CodeStubAssembler::StoreExternalPointerToObject(
+ TNode<HeapObject> object, TNode<IntPtrT> offset, TNode<RawPtrT> pointer,
+ ExternalPointerTag external_pointer_tag) {
+#ifdef V8_HEAP_SANDBOX
+ TNode<ExternalReference> external_pointer_table_address = ExternalConstant(
+ ExternalReference::external_pointer_table_address(isolate()));
+ TNode<RawPtrT> table = UncheckedCast<RawPtrT>(
+ Load(MachineType::Pointer(), external_pointer_table_address,
+ UintPtrConstant(Internals::kExternalPointerTableBufferOffset)));
+
+ TNode<ExternalPointerT> encoded =
+ LoadObjectField<ExternalPointerT>(object, offset);
+ TNode<Word32T> index = ChangeExternalPointerToUint32(encoded);
+ // TODO(v8:10391, saelo): bounds check if table is not caged
+ TNode<IntPtrT> table_offset = ElementOffsetFromIndex(
+ ChangeUint32ToWord(index), SYSTEM_POINTER_ELEMENTS, 0);
+
+ TNode<UintPtrT> value = UncheckedCast<UintPtrT>(pointer);
+ if (external_pointer_tag != 0) {
+ TNode<UintPtrT> tag = UintPtrConstant(external_pointer_tag);
+ value = UncheckedCast<UintPtrT>(WordXor(pointer, tag));
+ }
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), table, table_offset,
+ value);
+#else
+ StoreObjectFieldNoWriteBarrier<RawPtrT>(object, offset, pointer);
+#endif // V8_HEAP_SANDBOX
+}
+
TNode<Object> CodeStubAssembler::LoadFromParentFrame(int offset) {
TNode<RawPtrT> frame_pointer = LoadParentFramePointer();
return LoadFullTagged(frame_pointer, IntPtrConstant(offset));
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
- SloppyTNode<HeapObject> object, int offset) {
+ TNode<HeapObject> object, int offset) {
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += 4;
@@ -1388,7 +1497,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
}
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(
- SloppyTNode<HeapObject> object, int offset) {
+ TNode<HeapObject> object, int offset) {
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += 4;
@@ -1400,7 +1509,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(
}
TNode<Float64T> CodeStubAssembler::LoadHeapNumberValue(
- SloppyTNode<HeapObject> object) {
+ TNode<HeapObject> object) {
CSA_ASSERT(this, Word32Or(IsHeapNumber(object), IsOddball(object)));
STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
return LoadObjectField<Float64T>(object, HeapNumber::kValueOffset);
@@ -1413,27 +1522,26 @@ TNode<Map> CodeStubAssembler::GetInstanceTypeMap(InstanceType instance_type) {
return HeapConstant(map_handle);
}
-TNode<Map> CodeStubAssembler::LoadMap(SloppyTNode<HeapObject> object) {
+TNode<Map> CodeStubAssembler::LoadMap(TNode<HeapObject> object) {
return LoadObjectField<Map>(object, HeapObject::kMapOffset);
}
-TNode<Uint16T> CodeStubAssembler::LoadInstanceType(
- SloppyTNode<HeapObject> object) {
+TNode<Uint16T> CodeStubAssembler::LoadInstanceType(TNode<HeapObject> object) {
return LoadMapInstanceType(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::HasInstanceType(SloppyTNode<HeapObject> object,
+TNode<BoolT> CodeStubAssembler::HasInstanceType(TNode<HeapObject> object,
InstanceType instance_type) {
return InstanceTypeEqual(LoadInstanceType(object), instance_type);
}
TNode<BoolT> CodeStubAssembler::DoesntHaveInstanceType(
- SloppyTNode<HeapObject> object, InstanceType instance_type) {
+ TNode<HeapObject> object, InstanceType instance_type) {
return Word32NotEqual(LoadInstanceType(object), Int32Constant(instance_type));
}
TNode<BoolT> CodeStubAssembler::TaggedDoesntHaveInstanceType(
- SloppyTNode<HeapObject> any_tagged, InstanceType type) {
+ TNode<HeapObject> any_tagged, InstanceType type) {
/* return Phi <TaggedIsSmi(val), DoesntHaveInstanceType(val, type)> */
TNode<BoolT> tagged_is_smi = TaggedIsSmi(any_tagged);
return Select<BoolT>(
@@ -1441,8 +1549,7 @@ TNode<BoolT> CodeStubAssembler::TaggedDoesntHaveInstanceType(
[=]() { return DoesntHaveInstanceType(any_tagged, type); });
}
-TNode<BoolT> CodeStubAssembler::IsSpecialReceiverMap(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsSpecialReceiverMap(TNode<Map> map) {
TNode<BoolT> is_special =
IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
uint32_t mask = Map::Bits1::HasNamedInterceptorBit::kMask |
@@ -1470,7 +1577,7 @@ void CodeStubAssembler::GotoIfMapHasSlowProperties(TNode<Map> map,
}
TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
- SloppyTNode<JSReceiver> object) {
+ TNode<JSReceiver> object) {
CSA_SLOW_ASSERT(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object))));
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
return Select<HeapObject>(
@@ -1479,7 +1586,7 @@ TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
}
TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
- SloppyTNode<JSReceiver> object) {
+ TNode<JSReceiver> object) {
CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object)));
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
return Select<HeapObject>(
@@ -1496,8 +1603,7 @@ TNode<Object> CodeStubAssembler::LoadJSArgumentsObjectLength(
return LoadObjectField(array, offset);
}
-TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(
- SloppyTNode<JSArray> array) {
+TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(TNode<JSArray> array) {
TNode<Number> length = LoadJSArrayLength(array);
CSA_ASSERT(this, Word32Or(IsFastElementsKind(LoadElementsKind(array)),
IsElementsKindInRange(
@@ -1510,13 +1616,13 @@ TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(
}
TNode<Smi> CodeStubAssembler::LoadFixedArrayBaseLength(
- SloppyTNode<FixedArrayBase> array) {
+ TNode<FixedArrayBase> array) {
CSA_SLOW_ASSERT(this, IsNotWeakFixedArraySubclass(array));
return LoadObjectField<Smi>(array, FixedArrayBase::kLengthOffset);
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagFixedArrayBaseLength(
- SloppyTNode<FixedArrayBase> array) {
+ TNode<FixedArrayBase> array) {
return LoadAndUntagObjectField(array, FixedArrayBase::kLengthOffset);
}
@@ -1532,7 +1638,7 @@ TNode<Smi> CodeStubAssembler::LoadWeakFixedArrayLength(
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagWeakFixedArrayLength(
- SloppyTNode<WeakFixedArray> array) {
+ TNode<WeakFixedArray> array) {
return LoadAndUntagObjectField(array, WeakFixedArray::kLengthOffset);
}
@@ -1548,59 +1654,48 @@ TNode<Int32T> CodeStubAssembler::LoadNumberOfOwnDescriptors(TNode<Map> map) {
DecodeWord32<Map::Bits3::NumberOfOwnDescriptorsBits>(bit_field3));
}
-TNode<Int32T> CodeStubAssembler::LoadMapBitField(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<Int32T> CodeStubAssembler::LoadMapBitField(TNode<Map> map) {
return UncheckedCast<Int32T>(
LoadObjectField<Uint8T>(map, Map::kBitFieldOffset));
}
-TNode<Int32T> CodeStubAssembler::LoadMapBitField2(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<Int32T> CodeStubAssembler::LoadMapBitField2(TNode<Map> map) {
return UncheckedCast<Int32T>(
LoadObjectField<Uint8T>(map, Map::kBitField2Offset));
}
-TNode<Uint32T> CodeStubAssembler::LoadMapBitField3(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<Uint32T> CodeStubAssembler::LoadMapBitField3(TNode<Map> map) {
return LoadObjectField<Uint32T>(map, Map::kBitField3Offset);
}
-TNode<Uint16T> CodeStubAssembler::LoadMapInstanceType(SloppyTNode<Map> map) {
+TNode<Uint16T> CodeStubAssembler::LoadMapInstanceType(TNode<Map> map) {
return LoadObjectField<Uint16T>(map, Map::kInstanceTypeOffset);
}
-TNode<Int32T> CodeStubAssembler::LoadMapElementsKind(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<Int32T> CodeStubAssembler::LoadMapElementsKind(TNode<Map> map) {
TNode<Int32T> bit_field2 = LoadMapBitField2(map);
return Signed(DecodeWord32<Map::Bits2::ElementsKindBits>(bit_field2));
}
-TNode<Int32T> CodeStubAssembler::LoadElementsKind(
- SloppyTNode<HeapObject> object) {
+TNode<Int32T> CodeStubAssembler::LoadElementsKind(TNode<HeapObject> object) {
return LoadMapElementsKind(LoadMap(object));
}
-TNode<DescriptorArray> CodeStubAssembler::LoadMapDescriptors(
- SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<DescriptorArray> CodeStubAssembler::LoadMapDescriptors(TNode<Map> map) {
return LoadObjectField<DescriptorArray>(map, Map::kInstanceDescriptorsOffset);
}
-TNode<HeapObject> CodeStubAssembler::LoadMapPrototype(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<HeapObject> CodeStubAssembler::LoadMapPrototype(TNode<Map> map) {
return LoadObjectField<HeapObject>(map, Map::kPrototypeOffset);
}
-TNode<IntPtrT> CodeStubAssembler::LoadMapInstanceSizeInWords(
- SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<IntPtrT> CodeStubAssembler::LoadMapInstanceSizeInWords(TNode<Map> map) {
return ChangeInt32ToIntPtr(
LoadObjectField<Uint8T>(map, Map::kInstanceSizeInWordsOffset));
}
TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords(
- SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+ TNode<Map> map) {
// See Map::GetInObjectPropertiesStartInWords() for details.
CSA_ASSERT(this, IsJSObjectMap(map));
return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>(
@@ -1608,16 +1703,14 @@ TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords(
}
TNode<IntPtrT> CodeStubAssembler::LoadMapConstructorFunctionIndex(
- SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+ TNode<Map> map) {
// See Map::GetConstructorFunctionIndex() for details.
CSA_ASSERT(this, IsPrimitiveInstanceType(LoadMapInstanceType(map)));
return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>(
map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset));
}
-TNode<Object> CodeStubAssembler::LoadMapConstructor(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<Object> CodeStubAssembler::LoadMapConstructor(TNode<Map> map) {
TVARIABLE(Object, result,
LoadObjectField(
map, Map::kConstructorOrBackPointerOrNativeContextOffset));
@@ -1639,13 +1732,12 @@ TNode<Object> CodeStubAssembler::LoadMapConstructor(SloppyTNode<Map> map) {
return result.value();
}
-TNode<WordT> CodeStubAssembler::LoadMapEnumLength(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<WordT> CodeStubAssembler::LoadMapEnumLength(TNode<Map> map) {
TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
return DecodeWordFromWord32<Map::Bits3::EnumLengthBits>(bit_field3);
}
-TNode<Object> CodeStubAssembler::LoadMapBackPointer(SloppyTNode<Map> map) {
+TNode<Object> CodeStubAssembler::LoadMapBackPointer(TNode<Map> map) {
TNode<HeapObject> object = CAST(LoadObjectField(
map, Map::kConstructorOrBackPointerOrNativeContextOffset));
return Select<Object>(
@@ -1743,14 +1835,12 @@ TNode<Smi> CodeStubAssembler::LoadStringLengthAsSmi(TNode<String> string) {
return SmiFromIntPtr(LoadStringLengthAsWord(string));
}
-TNode<IntPtrT> CodeStubAssembler::LoadStringLengthAsWord(
- SloppyTNode<String> string) {
+TNode<IntPtrT> CodeStubAssembler::LoadStringLengthAsWord(TNode<String> string) {
return Signed(ChangeUint32ToWord(LoadStringLengthAsWord32(string)));
}
TNode<Uint32T> CodeStubAssembler::LoadStringLengthAsWord32(
- SloppyTNode<String> string) {
- CSA_ASSERT(this, IsString(string));
+ TNode<String> string) {
return LoadObjectField<Uint32T>(string, String::kLengthOffset);
}
@@ -2005,10 +2095,9 @@ TNode<IntPtrT> CodeStubAssembler::LoadPropertyArrayLength(
TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayDataPtr(
TNode<JSTypedArray> typed_array) {
- // Data pointer = DecodeExternalPointer(external_pointer) +
- // static_cast<Tagged_t>(base_pointer).
+ // Data pointer = external_pointer + static_cast<Tagged_t>(base_pointer).
TNode<RawPtrT> external_pointer =
- DecodeExternalPointer(LoadJSTypedArrayExternalPointer(typed_array));
+ LoadJSTypedArrayExternalPointerPtr(typed_array);
TNode<IntPtrT> base_pointer;
if (COMPRESS_POINTERS_BOOL) {
@@ -2467,21 +2556,20 @@ TNode<BoolT> CodeStubAssembler::LoadScopeInfoHasExtensionField(
}
void CodeStubAssembler::StoreContextElementNoWriteBarrier(
- SloppyTNode<Context> context, int slot_index, SloppyTNode<Object> value) {
+ TNode<Context> context, int slot_index, SloppyTNode<Object> value) {
int offset = Context::SlotOffset(slot_index);
StoreNoWriteBarrier(MachineRepresentation::kTagged, context,
IntPtrConstant(offset), value);
}
TNode<NativeContext> CodeStubAssembler::LoadNativeContext(
- SloppyTNode<Context> context) {
+ TNode<Context> context) {
TNode<Map> map = LoadMap(context);
return CAST(LoadObjectField(
map, Map::kConstructorOrBackPointerOrNativeContextOffset));
}
-TNode<Context> CodeStubAssembler::LoadModuleContext(
- SloppyTNode<Context> context) {
+TNode<Context> CodeStubAssembler::LoadModuleContext(TNode<Context> context) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> module_map = CAST(
LoadContextElement(native_context, Context::MODULE_CONTEXT_MAP_INDEX));
@@ -2524,7 +2612,7 @@ TNode<Map> CodeStubAssembler::LoadSlowObjectWithNullPrototypeMap(
}
TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
- SloppyTNode<Int32T> kind, SloppyTNode<NativeContext> native_context) {
+ SloppyTNode<Int32T> kind, TNode<NativeContext> native_context) {
CSA_ASSERT(this, IsFastElementsKind(kind));
TNode<IntPtrT> offset =
IntPtrAdd(IntPtrConstant(Context::FIRST_JS_ARRAY_MAP_SLOT),
@@ -2533,7 +2621,7 @@ TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
}
TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
- ElementsKind kind, SloppyTNode<NativeContext> native_context) {
+ ElementsKind kind, TNode<NativeContext> native_context) {
return UncheckedCast<Map>(
LoadContextElement(native_context, Context::ArrayMapIndex(kind)));
}
@@ -2606,7 +2694,7 @@ TNode<HeapObject> CodeStubAssembler::LoadJSFunctionPrototype(
}
TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
- SloppyTNode<SharedFunctionInfo> shared) {
+ TNode<SharedFunctionInfo> shared) {
TNode<HeapObject> function_data = LoadObjectField<HeapObject>(
shared, SharedFunctionInfo::kFunctionDataOffset);
@@ -3263,7 +3351,7 @@ TNode<NameDictionary> CodeStubAssembler::CopyNameDictionary(
template <typename CollectionType>
TNode<CollectionType> CodeStubAssembler::AllocateOrderedHashTable() {
- static const int kCapacity = CollectionType::kMinCapacity;
+ static const int kCapacity = CollectionType::kInitialCapacity;
static const int kBucketCount = kCapacity / CollectionType::kLoadFactor;
static const int kDataTableLength = kCapacity * CollectionType::kEntrySize;
static const int kFixedArrayLength =
@@ -3322,7 +3410,6 @@ TNode<JSObject> CodeStubAssembler::AllocateJSObjectFromMap(
TNode<Map> map, base::Optional<TNode<HeapObject>> properties,
base::Optional<TNode<FixedArray>> elements, AllocationFlags flags,
SlackTrackingMode slack_tracking_mode) {
- CSA_ASSERT(this, IsMap(map));
CSA_ASSERT(this, Word32BinaryNot(IsJSFunctionMap(map)));
CSA_ASSERT(this, Word32BinaryNot(InstanceTypeEqual(LoadMapInstanceType(map),
JS_GLOBAL_OBJECT_TYPE)));
@@ -3340,7 +3427,6 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
base::Optional<TNode<HeapObject>> properties,
base::Optional<TNode<FixedArray>> elements,
SlackTrackingMode slack_tracking_mode) {
- CSA_SLOW_ASSERT(this, IsMap(map));
// This helper assumes that the object is in new-space, as guarded by the
// check in AllocatedJSObjectFromMap.
if (!properties) {
@@ -3370,7 +3456,7 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
}
void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
- SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ TNode<HeapObject> object, TNode<Map> map,
SloppyTNode<IntPtrT> instance_size, int start_offset) {
STATIC_ASSERT(Map::kNoSlackTracking == 0);
CSA_ASSERT(this, IsClearWord32<Map::Bits3::ConstructionCounterBits>(
@@ -3380,7 +3466,7 @@ void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
}
void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
- SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ TNode<HeapObject> object, TNode<Map> map,
SloppyTNode<IntPtrT> instance_size) {
Comment("InitializeJSObjectBodyNoSlackTracking");
@@ -3824,11 +3910,10 @@ template V8_EXPORT_PRIVATE TNode<FixedArrayBase>
template <typename TIndex>
TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
- SloppyTNode<FixedArrayBase> source, TNode<TIndex> first,
- TNode<TIndex> count, TNode<TIndex> capacity, TNode<Map> source_map,
- ElementsKind from_kind, AllocationFlags allocation_flags,
- ExtractFixedArrayFlags extract_flags, HoleConversionMode convert_holes,
- TVariable<BoolT>* var_holes_converted,
+ TNode<FixedArrayBase> source, TNode<TIndex> first, TNode<TIndex> count,
+ TNode<TIndex> capacity, TNode<Map> source_map, ElementsKind from_kind,
+ AllocationFlags allocation_flags, ExtractFixedArrayFlags extract_flags,
+ HoleConversionMode convert_holes, TVariable<BoolT>* var_holes_converted,
base::Optional<TNode<Int32T>> source_elements_kind) {
static_assert(
std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
@@ -4046,8 +4131,8 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
Label if_hole(this);
- Node* value = LoadElementAndPrepareForStore(
- from_array, var_from_offset.value(), kind, kind, &if_hole);
+ TNode<Float64T> value = LoadDoubleWithHoleCheck(
+ from_array, var_from_offset.value(), &if_hole, MachineType::Float64());
StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array_adjusted,
to_offset, value);
@@ -4910,7 +4995,7 @@ TNode<Float64T> CodeStubAssembler::TryTaggedToFloat64(
}
TNode<Float64T> CodeStubAssembler::TruncateTaggedToFloat64(
- SloppyTNode<Context> context, SloppyTNode<Object> value) {
+ TNode<Context> context, SloppyTNode<Object> value) {
// We might need to loop once due to ToNumber conversion.
TVARIABLE(Object, var_value, value);
TVARIABLE(Float64T, var_result);
@@ -4940,7 +5025,7 @@ TNode<Float64T> CodeStubAssembler::TruncateTaggedToFloat64(
}
TNode<Word32T> CodeStubAssembler::TruncateTaggedToWord32(
- SloppyTNode<Context> context, SloppyTNode<Object> value) {
+ TNode<Context> context, SloppyTNode<Object> value) {
TVARIABLE(Word32T, var_result);
Label done(this);
TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumber>(context, value,
@@ -5586,13 +5671,11 @@ TNode<BoolT> CodeStubAssembler::InstanceTypeEqual(
return Word32Equal(instance_type, Int32Constant(type));
}
-TNode<BoolT> CodeStubAssembler::IsDictionaryMap(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsDictionaryMap(TNode<Map> map) {
return IsSetWord32<Map::Bits3::IsDictionaryMapBit>(LoadMapBitField3(map));
}
-TNode<BoolT> CodeStubAssembler::IsExtensibleMap(SloppyTNode<Map> map) {
- CSA_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsExtensibleMap(TNode<Map> map) {
return IsSetWord32<Map::Bits3::IsExtensibleBit>(LoadMapBitField3(map));
}
@@ -5604,18 +5687,15 @@ TNode<BoolT> CodeStubAssembler::IsExtensibleNonPrototypeMap(TNode<Map> map) {
Int32Constant(kExpected));
}
-TNode<BoolT> CodeStubAssembler::IsCallableMap(SloppyTNode<Map> map) {
- CSA_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsCallableMap(TNode<Map> map) {
return IsSetWord32<Map::Bits1::IsCallableBit>(LoadMapBitField(map));
}
-TNode<BoolT> CodeStubAssembler::IsDeprecatedMap(SloppyTNode<Map> map) {
- CSA_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsDeprecatedMap(TNode<Map> map) {
return IsSetWord32<Map::Bits3::IsDeprecatedBit>(LoadMapBitField3(map));
}
-TNode<BoolT> CodeStubAssembler::IsUndetectableMap(SloppyTNode<Map> map) {
- CSA_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsUndetectableMap(TNode<Map> map) {
return IsSetWord32<Map::Bits1::IsUndetectableBit>(LoadMapBitField(map));
}
@@ -5676,7 +5756,7 @@ TNode<BoolT> CodeStubAssembler::IsPromiseSpeciesProtectorCellInvalid() {
}
TNode<BoolT> CodeStubAssembler::IsPrototypeInitialArrayPrototype(
- SloppyTNode<Context> context, SloppyTNode<Map> map) {
+ TNode<Context> context, TNode<Map> map) {
const TNode<NativeContext> native_context = LoadNativeContext(context);
const TNode<Object> initial_array_prototype = LoadContextElement(
native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
@@ -5685,7 +5765,7 @@ TNode<BoolT> CodeStubAssembler::IsPrototypeInitialArrayPrototype(
}
TNode<BoolT> CodeStubAssembler::IsPrototypeTypedArrayPrototype(
- SloppyTNode<Context> context, SloppyTNode<Map> map) {
+ TNode<Context> context, TNode<Map> map) {
const TNode<NativeContext> native_context = LoadNativeContext(context);
const TNode<Object> typed_array_prototype =
LoadContextElement(native_context, Context::TYPED_ARRAY_PROTOTYPE_INDEX);
@@ -5736,22 +5816,19 @@ TNode<BoolT> CodeStubAssembler::TaggedIsCallable(TNode<Object> object) {
});
}
-TNode<BoolT> CodeStubAssembler::IsCallable(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsCallable(TNode<HeapObject> object) {
return IsCallableMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsConstructorMap(SloppyTNode<Map> map) {
- CSA_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsConstructorMap(TNode<Map> map) {
return IsSetWord32<Map::Bits1::IsConstructorBit>(LoadMapBitField(map));
}
-TNode<BoolT> CodeStubAssembler::IsConstructor(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsConstructor(TNode<HeapObject> object) {
return IsConstructorMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsFunctionWithPrototypeSlotMap(
- SloppyTNode<Map> map) {
- CSA_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsFunctionWithPrototypeSlotMap(TNode<Map> map) {
return IsSetWord32<Map::Bits1::HasPrototypeSlotBit>(LoadMapBitField(map));
}
@@ -5838,16 +5915,15 @@ TNode<BoolT> CodeStubAssembler::IsJSReceiverInstanceType(
Int32Constant(FIRST_JS_RECEIVER_TYPE));
}
-TNode<BoolT> CodeStubAssembler::IsJSReceiverMap(SloppyTNode<Map> map) {
+TNode<BoolT> CodeStubAssembler::IsJSReceiverMap(TNode<Map> map) {
return IsJSReceiverInstanceType(LoadMapInstanceType(map));
}
-TNode<BoolT> CodeStubAssembler::IsJSReceiver(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSReceiver(TNode<HeapObject> object) {
return IsJSReceiverMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsNullOrJSReceiver(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsNullOrJSReceiver(TNode<HeapObject> object) {
return UncheckedCast<BoolT>(Word32Or(IsJSReceiver(object), IsNull(object)));
}
@@ -5860,12 +5936,11 @@ TNode<BoolT> CodeStubAssembler::IsJSGlobalProxyInstanceType(
return InstanceTypeEqual(instance_type, JS_GLOBAL_PROXY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSGlobalProxyMap(SloppyTNode<Map> map) {
+TNode<BoolT> CodeStubAssembler::IsJSGlobalProxyMap(TNode<Map> map) {
return IsJSGlobalProxyInstanceType(LoadMapInstanceType(map));
}
-TNode<BoolT> CodeStubAssembler::IsJSGlobalProxy(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSGlobalProxy(TNode<HeapObject> object) {
return IsJSGlobalProxyMap(LoadMap(object));
}
@@ -5880,12 +5955,11 @@ TNode<BoolT> CodeStubAssembler::IsJSObjectInstanceType(
Int32Constant(FIRST_JS_OBJECT_TYPE));
}
-TNode<BoolT> CodeStubAssembler::IsJSObjectMap(SloppyTNode<Map> map) {
- CSA_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsJSObjectMap(TNode<Map> map) {
return IsJSObjectInstanceType(LoadMapInstanceType(map));
}
-TNode<BoolT> CodeStubAssembler::IsJSObject(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSObject(TNode<HeapObject> object) {
return IsJSObjectMap(LoadMap(object));
}
@@ -5899,30 +5973,28 @@ TNode<BoolT> CodeStubAssembler::IsJSFinalizationRegistry(
return IsJSFinalizationRegistryMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsJSPromiseMap(SloppyTNode<Map> map) {
- CSA_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsJSPromiseMap(TNode<Map> map) {
return InstanceTypeEqual(LoadMapInstanceType(map), JS_PROMISE_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSPromise(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSPromise(TNode<HeapObject> object) {
return IsJSPromiseMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsJSProxy(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSProxy(TNode<HeapObject> object) {
return HasInstanceType(object, JS_PROXY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSStringIterator(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSStringIterator(TNode<HeapObject> object) {
return HasInstanceType(object, JS_STRING_ITERATOR_TYPE);
}
TNode<BoolT> CodeStubAssembler::IsJSRegExpStringIterator(
- SloppyTNode<HeapObject> object) {
+ TNode<HeapObject> object) {
return HasInstanceType(object, JS_REG_EXP_STRING_ITERATOR_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsMap(SloppyTNode<HeapObject> map) {
+TNode<BoolT> CodeStubAssembler::IsMap(TNode<HeapObject> map) {
return IsMetaMap(LoadMap(map));
}
@@ -5931,12 +6003,11 @@ TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperInstanceType(
return InstanceTypeEqual(instance_type, JS_PRIMITIVE_WRAPPER_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapper(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapper(TNode<HeapObject> object) {
return IsJSPrimitiveWrapperMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperMap(SloppyTNode<Map> map) {
+TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperMap(TNode<Map> map) {
return IsJSPrimitiveWrapperInstanceType(LoadMapInstanceType(map));
}
@@ -5945,30 +6016,28 @@ TNode<BoolT> CodeStubAssembler::IsJSArrayInstanceType(
return InstanceTypeEqual(instance_type, JS_ARRAY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSArray(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSArray(TNode<HeapObject> object) {
return IsJSArrayMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsJSArrayMap(SloppyTNode<Map> map) {
+TNode<BoolT> CodeStubAssembler::IsJSArrayMap(TNode<Map> map) {
return IsJSArrayInstanceType(LoadMapInstanceType(map));
}
-TNode<BoolT> CodeStubAssembler::IsJSArrayIterator(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSArrayIterator(TNode<HeapObject> object) {
return HasInstanceType(object, JS_ARRAY_ITERATOR_TYPE);
}
TNode<BoolT> CodeStubAssembler::IsJSAsyncGeneratorObject(
- SloppyTNode<HeapObject> object) {
+ TNode<HeapObject> object) {
return HasInstanceType(object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsFixedArray(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsFixedArray(TNode<HeapObject> object) {
return HasInstanceType(object, FIXED_ARRAY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsFixedArraySubclass(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsFixedArraySubclass(TNode<HeapObject> object) {
TNode<Uint16T> instance_type = LoadInstanceType(object);
return UncheckedCast<BoolT>(
Word32And(Int32GreaterThanOrEqual(instance_type,
@@ -5978,7 +6047,7 @@ TNode<BoolT> CodeStubAssembler::IsFixedArraySubclass(
}
TNode<BoolT> CodeStubAssembler::IsNotWeakFixedArraySubclass(
- SloppyTNode<HeapObject> object) {
+ TNode<HeapObject> object) {
TNode<Uint16T> instance_type = LoadInstanceType(object);
return UncheckedCast<BoolT>(Word32Or(
Int32LessThan(instance_type, Int32Constant(FIRST_WEAK_FIXED_ARRAY_TYPE)),
@@ -5986,8 +6055,7 @@ TNode<BoolT> CodeStubAssembler::IsNotWeakFixedArraySubclass(
Int32Constant(LAST_WEAK_FIXED_ARRAY_TYPE))));
}
-TNode<BoolT> CodeStubAssembler::IsPropertyArray(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsPropertyArray(TNode<HeapObject> object) {
return HasInstanceType(object, PROPERTY_ARRAY_TYPE);
}
@@ -6008,7 +6076,7 @@ TNode<BoolT> CodeStubAssembler::IsPromiseReactionJobTask(
// TODO(jgruber): It might we worth creating an empty_double_array constant to
// simplify this case.
TNode<BoolT> CodeStubAssembler::IsFixedArrayWithKindOrEmpty(
- SloppyTNode<FixedArrayBase> object, ElementsKind kind) {
+ TNode<FixedArrayBase> object, ElementsKind kind) {
Label out(this);
TVARIABLE(BoolT, var_result, Int32TrueConstant());
@@ -6024,8 +6092,8 @@ TNode<BoolT> CodeStubAssembler::IsFixedArrayWithKindOrEmpty(
return var_result.value();
}
-TNode<BoolT> CodeStubAssembler::IsFixedArrayWithKind(
- SloppyTNode<HeapObject> object, ElementsKind kind) {
+TNode<BoolT> CodeStubAssembler::IsFixedArrayWithKind(TNode<HeapObject> object,
+ ElementsKind kind) {
if (IsDoubleElementsKind(kind)) {
return IsFixedDoubleArray(object);
} else {
@@ -6035,11 +6103,11 @@ TNode<BoolT> CodeStubAssembler::IsFixedArrayWithKind(
}
}
-TNode<BoolT> CodeStubAssembler::IsBoolean(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsBoolean(TNode<HeapObject> object) {
return IsBooleanMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsPropertyCell(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsPropertyCell(TNode<HeapObject> object) {
return IsPropertyCellMap(LoadMap(object));
}
@@ -6048,7 +6116,7 @@ TNode<BoolT> CodeStubAssembler::IsHeapNumberInstanceType(
return InstanceTypeEqual(instance_type, HEAP_NUMBER_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsOddball(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsOddball(TNode<HeapObject> object) {
return IsOddballInstanceType(LoadInstanceType(object));
}
@@ -6057,7 +6125,7 @@ TNode<BoolT> CodeStubAssembler::IsOddballInstanceType(
return InstanceTypeEqual(instance_type, ODDBALL_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsName(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsName(TNode<HeapObject> object) {
return IsNameInstanceType(LoadInstanceType(object));
}
@@ -6066,7 +6134,7 @@ TNode<BoolT> CodeStubAssembler::IsNameInstanceType(
return Int32LessThanOrEqual(instance_type, Int32Constant(LAST_NAME_TYPE));
}
-TNode<BoolT> CodeStubAssembler::IsString(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsString(TNode<HeapObject> object) {
return IsStringInstanceType(LoadInstanceType(object));
}
@@ -6134,7 +6202,7 @@ TNode<BoolT> CodeStubAssembler::IsBigIntInstanceType(
return InstanceTypeEqual(instance_type, BIGINT_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsBigInt(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsBigInt(TNode<HeapObject> object) {
return IsBigIntInstanceType(LoadInstanceType(object));
}
@@ -6149,7 +6217,7 @@ TNode<BoolT> CodeStubAssembler::IsPrivateName(SloppyTNode<Symbol> symbol) {
return IsSetWord32<Symbol::IsPrivateNameBit>(flags);
}
-TNode<BoolT> CodeStubAssembler::IsHashTable(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsHashTable(TNode<HeapObject> object) {
TNode<Uint16T> instance_type = LoadInstanceType(object);
return UncheckedCast<BoolT>(
Word32And(Int32GreaterThanOrEqual(instance_type,
@@ -6158,23 +6226,19 @@ TNode<BoolT> CodeStubAssembler::IsHashTable(SloppyTNode<HeapObject> object) {
Int32Constant(LAST_HASH_TABLE_TYPE))));
}
-TNode<BoolT> CodeStubAssembler::IsEphemeronHashTable(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsEphemeronHashTable(TNode<HeapObject> object) {
return HasInstanceType(object, EPHEMERON_HASH_TABLE_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsNameDictionary(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsNameDictionary(TNode<HeapObject> object) {
return HasInstanceType(object, NAME_DICTIONARY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsGlobalDictionary(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsGlobalDictionary(TNode<HeapObject> object) {
return HasInstanceType(object, GLOBAL_DICTIONARY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsNumberDictionary(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsNumberDictionary(TNode<HeapObject> object) {
return HasInstanceType(object, NUMBER_DICTIONARY_TYPE);
}
@@ -6187,16 +6251,15 @@ TNode<BoolT> CodeStubAssembler::IsJSFunctionInstanceType(
return InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSFunction(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSFunction(TNode<HeapObject> object) {
return IsJSFunctionMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsJSBoundFunction(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSBoundFunction(TNode<HeapObject> object) {
return HasInstanceType(object, JS_BOUND_FUNCTION_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSFunctionMap(SloppyTNode<Map> map) {
+TNode<BoolT> CodeStubAssembler::IsJSFunctionMap(TNode<Map> map) {
return IsJSFunctionInstanceType(LoadMapInstanceType(map));
}
@@ -6205,16 +6268,15 @@ TNode<BoolT> CodeStubAssembler::IsJSTypedArrayInstanceType(
return InstanceTypeEqual(instance_type, JS_TYPED_ARRAY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSTypedArrayMap(SloppyTNode<Map> map) {
+TNode<BoolT> CodeStubAssembler::IsJSTypedArrayMap(TNode<Map> map) {
return IsJSTypedArrayInstanceType(LoadMapInstanceType(map));
}
-TNode<BoolT> CodeStubAssembler::IsJSTypedArray(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSTypedArray(TNode<HeapObject> object) {
return IsJSTypedArrayMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsJSArrayBuffer(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSArrayBuffer(TNode<HeapObject> object) {
return HasInstanceType(object, JS_ARRAY_BUFFER_TYPE);
}
@@ -6222,7 +6284,7 @@ TNode<BoolT> CodeStubAssembler::IsJSDataView(TNode<HeapObject> object) {
return HasInstanceType(object, JS_DATA_VIEW_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSRegExp(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSRegExp(TNode<HeapObject> object) {
return HasInstanceType(object, JS_REG_EXP_TYPE);
}
@@ -6235,7 +6297,7 @@ TNode<BoolT> CodeStubAssembler::IsNumeric(SloppyTNode<Object> object) {
});
}
-TNode<BoolT> CodeStubAssembler::IsNumberNormalized(SloppyTNode<Number> number) {
+TNode<BoolT> CodeStubAssembler::IsNumberNormalized(TNode<Number> number) {
TVARIABLE(BoolT, var_result, Int32TrueConstant());
Label out(this);
@@ -6258,7 +6320,7 @@ TNode<BoolT> CodeStubAssembler::IsNumberNormalized(SloppyTNode<Number> number) {
return var_result.value();
}
-TNode<BoolT> CodeStubAssembler::IsNumberPositive(SloppyTNode<Number> number) {
+TNode<BoolT> CodeStubAssembler::IsNumberPositive(TNode<Number> number) {
return Select<BoolT>(
TaggedIsSmi(number), [=] { return TaggedIsPositiveSmi(number); },
[=] { return IsHeapNumberPositive(CAST(number)); });
@@ -6590,8 +6652,7 @@ TNode<RawPtrT> ToDirectStringAssembler::TryToSequential(
if_bailout);
TNode<String> string = var_string_.value();
- TNode<RawPtrT> result =
- DecodeExternalPointer(LoadExternalStringResourceData(CAST(string)));
+ TNode<RawPtrT> result = LoadExternalStringResourceDataPtr(CAST(string));
if (ptr_kind == PTR_TO_STRING) {
result = RawPtrSub(result, IntPtrConstant(SeqOneByteString::kHeaderSize -
kHeapObjectTag));
@@ -6756,174 +6817,154 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
return result.value();
}
-// TODO(solanes, v8:6949): Refactor this to check for JSReceivers first. If we
-// have a JSReceiver, extract the primitive and fallthrough. Otherwise, continue
-// asking for the other instance types. This will make it so that we can remove
-// the loop (which was looping at most once). Also, see if we can make use of
-// PlainPrimitiveNonNumberToNumber to de-duplicate code, maybe changing it to a
-// TryPlainPrimitiveNonNumberToNumber with a Label* as a parameter.
TNode<Numeric> CodeStubAssembler::NonNumberToNumberOrNumeric(
TNode<Context> context, TNode<HeapObject> input, Object::Conversion mode,
BigIntHandling bigint_handling) {
CSA_ASSERT(this, Word32BinaryNot(IsHeapNumber(input)));
- // We might need to loop once here due to ToPrimitive conversions.
TVARIABLE(HeapObject, var_input, input);
TVARIABLE(Numeric, var_result);
- Label loop(this, &var_input);
- Label end(this);
- Goto(&loop);
- BIND(&loop);
- {
- // Load the current {input} value (known to be a HeapObject).
- TNode<HeapObject> input = var_input.value();
-
- // Dispatch on the {input} instance type.
- TNode<Uint16T> input_instance_type = LoadInstanceType(input);
- Label if_inputisstring(this), if_inputisoddball(this),
- if_inputisbigint(this), if_inputisreceiver(this, Label::kDeferred),
- if_inputisother(this, Label::kDeferred);
- GotoIf(IsStringInstanceType(input_instance_type), &if_inputisstring);
- GotoIf(IsBigIntInstanceType(input_instance_type), &if_inputisbigint);
- GotoIf(InstanceTypeEqual(input_instance_type, ODDBALL_TYPE),
- &if_inputisoddball);
- Branch(IsJSReceiverInstanceType(input_instance_type), &if_inputisreceiver,
- &if_inputisother);
-
- BIND(&if_inputisstring);
+ TVARIABLE(Uint16T, instance_type, LoadInstanceType(var_input.value()));
+ Label end(this), if_inputisreceiver(this, Label::kDeferred),
+ if_inputisnotreceiver(this);
+
+ // We need to handle JSReceiver first since we might need to do two
+ // conversions due to ToPritmive.
+ Branch(IsJSReceiverInstanceType(instance_type.value()), &if_inputisreceiver,
+ &if_inputisnotreceiver);
+
+ BIND(&if_inputisreceiver);
+ {
+ // The {var_input.value()} is a JSReceiver, we need to convert it to a
+ // Primitive first using the ToPrimitive type conversion, preferably
+ // yielding a Number.
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ isolate(), ToPrimitiveHint::kNumber);
+ TNode<Object> result = CallStub(callable, context, var_input.value());
+
+ // Check if the {result} is already a Number/Numeric.
+ Label if_done(this), if_notdone(this);
+ Branch(mode == Object::Conversion::kToNumber ? IsNumber(result)
+ : IsNumeric(result),
+ &if_done, &if_notdone);
+
+ BIND(&if_done);
{
- // The {input} is a String, use the fast stub to convert it to a Number.
- TNode<String> string_input = CAST(input);
- var_result = StringToNumber(string_input);
+ // The ToPrimitive conversion already gave us a Number/Numeric, so
+ // we're done.
+ var_result = CAST(result);
Goto(&end);
}
- BIND(&if_inputisbigint);
- if (mode == Object::Conversion::kToNumeric) {
- var_result = CAST(input);
- Goto(&end);
- } else {
- DCHECK_EQ(mode, Object::Conversion::kToNumber);
- if (bigint_handling == BigIntHandling::kThrow) {
- Goto(&if_inputisother);
- } else {
- DCHECK_EQ(bigint_handling, BigIntHandling::kConvertToNumber);
- var_result =
- CAST(CallRuntime(Runtime::kBigIntToNumber, context, input));
- Goto(&end);
- }
- }
-
- BIND(&if_inputisoddball);
+ BIND(&if_notdone);
{
- // The {input} is an Oddball, we just need to load the Number value of it.
- var_result = LoadObjectField<Number>(input, Oddball::kToNumberOffset);
- Goto(&end);
+ // We now have a Primitive {result}, but it's not yet a
+ // Number/Numeric.
+ var_input = CAST(result);
+ // We have a new input. Redo the check and reload instance_type.
+ CSA_ASSERT(this, Word32BinaryNot(IsHeapNumber(var_input.value())));
+ instance_type = LoadInstanceType(var_input.value());
+ Goto(&if_inputisnotreceiver);
}
+ }
- BIND(&if_inputisreceiver);
+ BIND(&if_inputisnotreceiver);
+ {
+ Label not_plain_primitive(this), if_inputisbigint(this),
+ if_inputisother(this, Label::kDeferred);
+
+ // String and Oddball cases.
+ TVARIABLE(Number, var_result_number);
+ TryPlainPrimitiveNonNumberToNumber(var_input.value(), &var_result_number,
+ &not_plain_primitive);
+ var_result = var_result_number.value();
+ Goto(&end);
+
+ BIND(&not_plain_primitive);
{
- // The {input} is a JSReceiver, we need to convert it to a Primitive
- // first using the ToPrimitive type conversion, preferably yielding a
- // Number.
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(
- isolate(), ToPrimitiveHint::kNumber);
- TNode<Object> result = CallStub(callable, context, input);
-
- // Check if the {result} is already a Number/Numeric.
- Label if_done(this), if_notdone(this);
- Branch(mode == Object::Conversion::kToNumber ? IsNumber(result)
- : IsNumeric(result),
- &if_done, &if_notdone);
-
- BIND(&if_done);
+ Branch(IsBigIntInstanceType(instance_type.value()), &if_inputisbigint,
+ &if_inputisother);
+
+ BIND(&if_inputisbigint);
{
- // The ToPrimitive conversion already gave us a Number/Numeric, so
- // we're done.
- var_result = CAST(result);
- Goto(&end);
+ if (mode == Object::Conversion::kToNumeric) {
+ var_result = CAST(var_input.value());
+ Goto(&end);
+ } else {
+ DCHECK_EQ(mode, Object::Conversion::kToNumber);
+ if (bigint_handling == BigIntHandling::kThrow) {
+ Goto(&if_inputisother);
+ } else {
+ DCHECK_EQ(bigint_handling, BigIntHandling::kConvertToNumber);
+ var_result = CAST(CallRuntime(Runtime::kBigIntToNumber, context,
+ var_input.value()));
+ Goto(&end);
+ }
+ }
}
- BIND(&if_notdone);
+ BIND(&if_inputisother);
{
- // We now have a Primitive {result}, but it's not yet a
- // Number/Numeric.
- var_input = CAST(result);
- Goto(&loop);
+ // The {var_input.value()} is something else (e.g. Symbol), let the
+ // runtime figure out the correct exception. Note: We cannot tail call
+ // to the runtime here, as js-to-wasm trampolines also use this code
+ // currently, and they declare all outgoing parameters as untagged,
+ // while we would push a tagged object here.
+ auto function_id = mode == Object::Conversion::kToNumber
+ ? Runtime::kToNumber
+ : Runtime::kToNumeric;
+ var_result = CAST(CallRuntime(function_id, context, var_input.value()));
+ Goto(&end);
}
}
-
- BIND(&if_inputisother);
- {
- // The {input} is something else (e.g. Symbol), let the runtime figure
- // out the correct exception.
- // Note: We cannot tail call to the runtime here, as js-to-wasm
- // trampolines also use this code currently, and they declare all
- // outgoing parameters as untagged, while we would push a tagged
- // object here.
- auto function_id = mode == Object::Conversion::kToNumber
- ? Runtime::kToNumber
- : Runtime::kToNumeric;
- var_result = CAST(CallRuntime(function_id, context, input));
- Goto(&end);
- }
}
BIND(&end);
if (mode == Object::Conversion::kToNumber) {
CSA_ASSERT(this, IsNumber(var_result.value()));
- } else {
- DCHECK_EQ(mode, Object::Conversion::kToNumeric);
}
return var_result.value();
}
TNode<Number> CodeStubAssembler::NonNumberToNumber(
- TNode<Context> context, SloppyTNode<HeapObject> input,
+ TNode<Context> context, TNode<HeapObject> input,
BigIntHandling bigint_handling) {
return CAST(NonNumberToNumberOrNumeric(
context, input, Object::Conversion::kToNumber, bigint_handling));
}
-TNode<Number> CodeStubAssembler::PlainPrimitiveNonNumberToNumber(
- TNode<HeapObject> input) {
+void CodeStubAssembler::TryPlainPrimitiveNonNumberToNumber(
+ TNode<HeapObject> input, TVariable<Number>* var_result, Label* if_bailout) {
CSA_ASSERT(this, Word32BinaryNot(IsHeapNumber(input)));
- TVARIABLE(Number, var_result);
Label done(this);
// Dispatch on the {input} instance type.
TNode<Uint16T> input_instance_type = LoadInstanceType(input);
- Label if_inputisstring(this), if_inputisoddball(this);
+ Label if_inputisstring(this);
GotoIf(IsStringInstanceType(input_instance_type), &if_inputisstring);
- CSA_ASSERT(this, InstanceTypeEqual(input_instance_type, ODDBALL_TYPE));
- Goto(&if_inputisoddball);
+ GotoIfNot(InstanceTypeEqual(input_instance_type, ODDBALL_TYPE), if_bailout);
+
+ // The {input} is an Oddball, we just need to load the Number value of it.
+ *var_result = LoadObjectField<Number>(input, Oddball::kToNumberOffset);
+ Goto(&done);
BIND(&if_inputisstring);
{
// The {input} is a String, use the fast stub to convert it to a Number.
- TNode<String> string_input = CAST(input);
- var_result = StringToNumber(string_input);
- Goto(&done);
- }
-
- BIND(&if_inputisoddball);
- {
- // The {input} is an Oddball, we just need to load the Number value of it.
- var_result = LoadObjectField<Number>(input, Oddball::kToNumberOffset);
+ *var_result = StringToNumber(CAST(input));
Goto(&done);
}
BIND(&done);
- return var_result.value();
}
-TNode<Numeric> CodeStubAssembler::NonNumberToNumeric(
- TNode<Context> context, SloppyTNode<HeapObject> input) {
+TNode<Numeric> CodeStubAssembler::NonNumberToNumeric(TNode<Context> context,
+ TNode<HeapObject> input) {
return NonNumberToNumberOrNumeric(context, input,
Object::Conversion::kToNumeric);
}
-TNode<Number> CodeStubAssembler::ToNumber_Inline(SloppyTNode<Context> context,
+TNode<Number> CodeStubAssembler::ToNumber_Inline(TNode<Context> context,
SloppyTNode<Object> input) {
TVARIABLE(Number, var_result);
Label end(this), not_smi(this, Label::kDeferred);
@@ -6982,7 +7023,7 @@ TNode<Number> CodeStubAssembler::ToNumber(TNode<Context> context,
TNode<Number> CodeStubAssembler::PlainPrimitiveToNumber(TNode<Object> input) {
TVARIABLE(Number, var_result);
- Label end(this);
+ Label end(this), fallback(this);
Label not_smi(this, Label::kDeferred);
GotoIfNot(TaggedIsSmi(input), &not_smi);
@@ -7002,8 +7043,10 @@ TNode<Number> CodeStubAssembler::PlainPrimitiveToNumber(TNode<Object> input) {
BIND(&not_heap_number);
{
- var_result = PlainPrimitiveNonNumberToNumber(input_ho);
+ TryPlainPrimitiveNonNumberToNumber(input_ho, &var_result, &fallback);
Goto(&end);
+ BIND(&fallback);
+ Unreachable();
}
}
@@ -7090,7 +7133,7 @@ void CodeStubAssembler::TaggedToNumeric(TNode<Context> context,
}
// ES#sec-touint32
-TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
+TNode<Number> CodeStubAssembler::ToUint32(TNode<Context> context,
SloppyTNode<Object> input) {
const TNode<Float64T> float_zero = Float64Constant(0.0);
const TNode<Float64T> float_two_32 =
@@ -7193,7 +7236,7 @@ TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
return CAST(var_result.value());
}
-TNode<String> CodeStubAssembler::ToString_Inline(SloppyTNode<Context> context,
+TNode<String> CodeStubAssembler::ToString_Inline(TNode<Context> context,
SloppyTNode<Object> input) {
TVARIABLE(Object, var_result, input);
Label stub_call(this, Label::kDeferred), out(this);
@@ -7209,7 +7252,7 @@ TNode<String> CodeStubAssembler::ToString_Inline(SloppyTNode<Context> context,
return CAST(var_result.value());
}
-TNode<JSReceiver> CodeStubAssembler::ToObject(SloppyTNode<Context> context,
+TNode<JSReceiver> CodeStubAssembler::ToObject(TNode<Context> context,
SloppyTNode<Object> input) {
return CAST(CallBuiltin(Builtins::kToObject, context, input));
}
@@ -7238,7 +7281,7 @@ TNode<JSReceiver> CodeStubAssembler::ToObject_Inline(TNode<Context> context,
return result.value();
}
-TNode<Number> CodeStubAssembler::ToLength_Inline(SloppyTNode<Context> context,
+TNode<Number> CodeStubAssembler::ToLength_Inline(TNode<Context> context,
SloppyTNode<Object> input) {
TNode<Smi> smi_zero = SmiConstant(0);
return Select<Number>(
@@ -7252,7 +7295,7 @@ TNode<Object> CodeStubAssembler::OrdinaryToPrimitive(
return CallStub(callable, context, input);
}
-TNode<Uint32T> CodeStubAssembler::DecodeWord32(SloppyTNode<Word32T> word32,
+TNode<Uint32T> CodeStubAssembler::DecodeWord32(TNode<Word32T> word32,
uint32_t shift, uint32_t mask) {
DCHECK_EQ((mask >> shift) << shift, mask);
return Unsigned(Word32And(Word32Shr(word32, static_cast<int>(shift)),
@@ -7445,7 +7488,7 @@ void CodeStubAssembler::TryToName(SloppyTNode<Object> key, Label* if_keyisindex,
}
void CodeStubAssembler::TryInternalizeString(
- SloppyTNode<String> string, Label* if_index, TVariable<IntPtrT>* var_index,
+ TNode<String> string, Label* if_index, TVariable<IntPtrT>* var_index,
Label* if_internalized, TVariable<Name>* var_internalized,
Label* if_not_internalized, Label* if_bailout) {
TNode<ExternalReference> function = ExternalConstant(
@@ -8329,10 +8372,12 @@ TNode<NativeContext> CodeStubAssembler::GetCreationContext(
return native_context;
}
-void CodeStubAssembler::DescriptorLookup(
- SloppyTNode<Name> unique_name, SloppyTNode<DescriptorArray> descriptors,
- SloppyTNode<Uint32T> bitfield3, Label* if_found,
- TVariable<IntPtrT>* var_name_index, Label* if_not_found) {
+void CodeStubAssembler::DescriptorLookup(TNode<Name> unique_name,
+ TNode<DescriptorArray> descriptors,
+ TNode<Uint32T> bitfield3,
+ Label* if_found,
+ TVariable<IntPtrT>* var_name_index,
+ Label* if_not_found) {
Comment("DescriptorArrayLookup");
TNode<Uint32T> nof =
DecodeWord32<Map::Bits3::NumberOfOwnDescriptorsBits>(bitfield3);
@@ -8340,9 +8385,11 @@ void CodeStubAssembler::DescriptorLookup(
var_name_index, if_not_found);
}
-void CodeStubAssembler::TransitionLookup(
- SloppyTNode<Name> unique_name, SloppyTNode<TransitionArray> transitions,
- Label* if_found, TVariable<IntPtrT>* var_name_index, Label* if_not_found) {
+void CodeStubAssembler::TransitionLookup(TNode<Name> unique_name,
+ TNode<TransitionArray> transitions,
+ Label* if_found,
+ TVariable<IntPtrT>* var_name_index,
+ Label* if_not_found) {
Comment("TransitionArrayLookup");
TNode<Uint32T> number_of_valid_transitions =
NumberOfEntries<TransitionArray>(transitions);
@@ -8409,11 +8456,11 @@ void CodeStubAssembler::TryLookupPropertyInSimpleObject(
}
void CodeStubAssembler::TryLookupProperty(
- SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
- SloppyTNode<Int32T> instance_type, SloppyTNode<Name> unique_name,
- Label* if_found_fast, Label* if_found_dict, Label* if_found_global,
- TVariable<HeapObject>* var_meta_storage, TVariable<IntPtrT>* var_name_index,
- Label* if_not_found, Label* if_bailout) {
+ TNode<HeapObject> object, TNode<Map> map, SloppyTNode<Int32T> instance_type,
+ TNode<Name> unique_name, Label* if_found_fast, Label* if_found_dict,
+ Label* if_found_global, TVariable<HeapObject>* var_meta_storage,
+ TVariable<IntPtrT>* var_name_index, Label* if_not_found,
+ Label* if_bailout) {
Label if_objectisspecial(this);
GotoIf(IsSpecialReceiverInstanceType(instance_type), &if_objectisspecial);
@@ -8745,22 +8792,21 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
}
void CodeStubAssembler::TryGetOwnProperty(
- TNode<Context> context, TNode<HeapObject> receiver,
- TNode<JSReceiver> object, TNode<Map> map, TNode<Int32T> instance_type,
- TNode<Name> unique_name, Label* if_found_value,
- TVariable<Object>* var_value, Label* if_not_found, Label* if_bailout) {
+ TNode<Context> context, TNode<Object> receiver, TNode<JSReceiver> object,
+ TNode<Map> map, TNode<Int32T> instance_type, TNode<Name> unique_name,
+ Label* if_found_value, TVariable<Object>* var_value, Label* if_not_found,
+ Label* if_bailout) {
TryGetOwnProperty(context, receiver, object, map, instance_type, unique_name,
if_found_value, var_value, nullptr, nullptr, if_not_found,
if_bailout, kCallJSGetter);
}
void CodeStubAssembler::TryGetOwnProperty(
- TNode<Context> context, TNode<HeapObject> receiver,
- TNode<JSReceiver> object, TNode<Map> map, TNode<Int32T> instance_type,
- TNode<Name> unique_name, Label* if_found_value,
- TVariable<Object>* var_value, TVariable<Uint32T>* var_details,
- TVariable<Object>* var_raw_value, Label* if_not_found, Label* if_bailout,
- GetOwnPropertyMode mode) {
+ TNode<Context> context, TNode<Object> receiver, TNode<JSReceiver> object,
+ TNode<Map> map, TNode<Int32T> instance_type, TNode<Name> unique_name,
+ Label* if_found_value, TVariable<Object>* var_value,
+ TVariable<Uint32T>* var_details, TVariable<Object>* var_raw_value,
+ Label* if_not_found, Label* if_bailout, GetOwnPropertyMode mode) {
DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
Comment("TryGetOwnProperty");
CSA_ASSERT(this, IsUniqueNameNoCachedIndex(unique_name));
@@ -9306,14 +9352,14 @@ TNode<BoolT> CodeStubAssembler::IsOffsetInBounds(SloppyTNode<IntPtrT> offset,
}
TNode<HeapObject> CodeStubAssembler::LoadFeedbackCellValue(
- SloppyTNode<JSFunction> closure) {
+ TNode<JSFunction> closure) {
TNode<FeedbackCell> feedback_cell =
LoadObjectField<FeedbackCell>(closure, JSFunction::kFeedbackCellOffset);
return LoadObjectField<HeapObject>(feedback_cell, FeedbackCell::kValueOffset);
}
TNode<HeapObject> CodeStubAssembler::LoadFeedbackVector(
- SloppyTNode<JSFunction> closure) {
+ TNode<JSFunction> closure) {
TVARIABLE(HeapObject, maybe_vector, LoadFeedbackCellValue(closure));
Label done(this);
@@ -9331,7 +9377,7 @@ TNode<HeapObject> CodeStubAssembler::LoadFeedbackVector(
}
TNode<ClosureFeedbackCellArray> CodeStubAssembler::LoadClosureFeedbackArray(
- SloppyTNode<JSFunction> closure) {
+ TNode<JSFunction> closure) {
TVARIABLE(HeapObject, feedback_cell_array, LoadFeedbackCellValue(closure));
Label end(this);
@@ -9417,7 +9463,7 @@ void CodeStubAssembler::CombineFeedback(TVariable<Smi>* existing_feedback,
*existing_feedback = SmiOr(existing_feedback->value(), feedback);
}
-void CodeStubAssembler::CheckForAssociatedProtector(SloppyTNode<Name> name,
+void CodeStubAssembler::CheckForAssociatedProtector(TNode<Name> name,
Label* if_protector) {
// This list must be kept in sync with LookupIterator::UpdateProtector!
// TODO(jkummerow): Would it be faster to have a bit in Symbol::flags()?
@@ -9522,14 +9568,19 @@ MachineRepresentation ElementsKindToMachineRepresentation(ElementsKind kind) {
} // namespace
-template <typename TIndex>
-void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
- TNode<TIndex> index, Node* value) {
+template <typename TArray, typename TIndex>
+void CodeStubAssembler::StoreElementBigIntOrTypedArray(TNode<TArray> elements,
+ ElementsKind kind,
+ TNode<TIndex> index,
+ Node* value) {
// TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants?
static_assert(std::is_same<TIndex, Smi>::value ||
std::is_same<TIndex, UintPtrT>::value ||
std::is_same<TIndex, IntPtrT>::value,
"Only Smi, UintPtrT or IntPtrT index is allowed");
+ static_assert(std::is_same<TArray, RawPtrT>::value ||
+ std::is_same<TArray, FixedArrayBase>::value,
+ "Only RawPtrT or FixedArrayBase elements are allowed");
if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
TNode<IntPtrT> offset = ElementOffsetFromIndex(index, kind, 0);
TVARIABLE(UintPtrT, var_low);
@@ -9555,7 +9606,8 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
var_high.value());
}
#endif
- } else if (IsTypedArrayElementsKind(kind)) {
+ } else {
+ DCHECK(IsTypedArrayElementsKind(kind));
if (kind == UINT8_CLAMPED_ELEMENTS) {
CSA_ASSERT(this, Word32Equal(UncheckedCast<Word32T>(value),
Word32And(Int32Constant(0xFF), value)));
@@ -9564,7 +9616,16 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
// TODO(cbruni): Add OOB check once typed.
MachineRepresentation rep = ElementsKindToMachineRepresentation(kind);
StoreNoWriteBarrier(rep, elements, offset, value);
- return;
+ }
+}
+
+template <typename TIndex>
+void CodeStubAssembler::StoreElement(TNode<FixedArrayBase> elements,
+ ElementsKind kind, TNode<TIndex> index,
+ Node* value) {
+ if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS ||
+ IsTypedArrayElementsKind(kind)) {
+ StoreElementBigIntOrTypedArray(elements, kind, index, value);
} else if (IsDoubleElementsKind(kind)) {
TNode<Float64T> value_float64 = UncheckedCast<Float64T>(value);
StoreFixedDoubleArrayElement(CAST(elements), index, value_float64);
@@ -9576,14 +9637,15 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
}
}
-template V8_EXPORT_PRIVATE void CodeStubAssembler::StoreElement<Smi>(
- Node*, ElementsKind, TNode<Smi>, Node*);
-
-template V8_EXPORT_PRIVATE void CodeStubAssembler::StoreElement<IntPtrT>(
- Node*, ElementsKind, TNode<IntPtrT>, Node*);
-
+template <typename TIndex>
+void CodeStubAssembler::StoreElement(TNode<RawPtrT> elements, ElementsKind kind,
+ TNode<TIndex> index, Node* value) {
+ DCHECK(kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS ||
+ IsTypedArrayElementsKind(kind));
+ StoreElementBigIntOrTypedArray(elements, kind, index, value);
+}
template V8_EXPORT_PRIVATE void CodeStubAssembler::StoreElement<UintPtrT>(
- Node*, ElementsKind, TNode<UintPtrT>, Node*);
+ TNode<RawPtrT>, ElementsKind, TNode<UintPtrT>, Node*);
TNode<Uint8T> CodeStubAssembler::Int32ToUint8Clamped(
TNode<Int32T> int32_value) {
@@ -10346,14 +10408,20 @@ TNode<TIndex> CodeStubAssembler::BuildFastLoop(const VariableList& vars,
}
// Instantiate BuildFastLoop for IntPtrT and UintPtrT.
-template TNode<IntPtrT> CodeStubAssembler::BuildFastLoop<IntPtrT>(
- const VariableList& vars, TNode<IntPtrT> start_index,
- TNode<IntPtrT> end_index, const FastLoopBody<IntPtrT>& body, int increment,
- IndexAdvanceMode advance_mode);
-template TNode<UintPtrT> CodeStubAssembler::BuildFastLoop<UintPtrT>(
- const VariableList& vars, TNode<UintPtrT> start_index,
- TNode<UintPtrT> end_index, const FastLoopBody<UintPtrT>& body,
- int increment, IndexAdvanceMode advance_mode);
+template V8_EXPORT_PRIVATE TNode<IntPtrT>
+CodeStubAssembler::BuildFastLoop<IntPtrT>(const VariableList& vars,
+ TNode<IntPtrT> start_index,
+ TNode<IntPtrT> end_index,
+ const FastLoopBody<IntPtrT>& body,
+ int increment,
+ IndexAdvanceMode advance_mode);
+template V8_EXPORT_PRIVATE TNode<UintPtrT>
+CodeStubAssembler::BuildFastLoop<UintPtrT>(const VariableList& vars,
+ TNode<UintPtrT> start_index,
+ TNode<UintPtrT> end_index,
+ const FastLoopBody<UintPtrT>& body,
+ int increment,
+ IndexAdvanceMode advance_mode);
template <typename TIndex>
void CodeStubAssembler::BuildFastArrayForEach(
@@ -10430,12 +10498,11 @@ void CodeStubAssembler::InitializeFieldsWithRoot(TNode<HeapObject> object,
-kTaggedSize, CodeStubAssembler::IndexAdvanceMode::kPre);
}
-void CodeStubAssembler::BranchIfNumberRelationalComparison(
- Operation op, SloppyTNode<Number> left, SloppyTNode<Number> right,
- Label* if_true, Label* if_false) {
- CSA_SLOW_ASSERT(this, IsNumber(left));
- CSA_SLOW_ASSERT(this, IsNumber(right));
-
+void CodeStubAssembler::BranchIfNumberRelationalComparison(Operation op,
+ TNode<Number> left,
+ TNode<Number> right,
+ Label* if_true,
+ Label* if_false) {
Label do_float_comparison(this);
TVARIABLE(Float64T, var_left_float);
TVARIABLE(Float64T, var_right_float);
@@ -10527,8 +10594,9 @@ void CodeStubAssembler::BranchIfNumberRelationalComparison(
}
}
-void CodeStubAssembler::GotoIfNumberGreaterThanOrEqual(
- SloppyTNode<Number> left, SloppyTNode<Number> right, Label* if_true) {
+void CodeStubAssembler::GotoIfNumberGreaterThanOrEqual(TNode<Number> left,
+ TNode<Number> right,
+ Label* if_true) {
Label if_false(this);
BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, left,
right, if_true, &if_false);
@@ -11083,7 +11151,7 @@ void CodeStubAssembler::GenerateEqual_Same(SloppyTNode<Object> value,
// ES6 section 7.2.12 Abstract Equality Comparison
TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
SloppyTNode<Object> right,
- SloppyTNode<Context> context,
+ TNode<Context> context,
TVariable<Smi>* var_type_feedback) {
// This is a slightly optimized version of Object::Equals. Whenever you
// change something functionality wise in here, remember to update the
@@ -12024,7 +12092,7 @@ void CodeStubAssembler::BranchIfSameNumberValue(TNode<Float64T> lhs_value,
}
}
-TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<Context> context,
+TNode<Oddball> CodeStubAssembler::HasProperty(TNode<Context> context,
SloppyTNode<Object> object,
SloppyTNode<Object> key,
HasPropertyLookupMode mode) {
@@ -12106,6 +12174,80 @@ TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<Context> context,
return result.value();
}
+void CodeStubAssembler::ForInPrepare(TNode<HeapObject> enumerator,
+ TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector,
+ TNode<FixedArray>* cache_array_out,
+ TNode<Smi>* cache_length_out) {
+ // Check if we're using an enum cache.
+ TVARIABLE(FixedArray, cache_array);
+ TVARIABLE(Smi, cache_length);
+ Label if_fast(this), if_slow(this, Label::kDeferred), out(this);
+ Branch(IsMap(enumerator), &if_fast, &if_slow);
+
+ BIND(&if_fast);
+ {
+ // Load the enumeration length and cache from the {enumerator}.
+ TNode<Map> map_enumerator = CAST(enumerator);
+ TNode<WordT> enum_length = LoadMapEnumLength(map_enumerator);
+ CSA_ASSERT(this, WordNotEqual(enum_length,
+ IntPtrConstant(kInvalidEnumCacheSentinel)));
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(map_enumerator);
+ TNode<EnumCache> enum_cache = LoadObjectField<EnumCache>(
+ descriptors, DescriptorArray::kEnumCacheOffset);
+ TNode<FixedArray> enum_keys =
+ LoadObjectField<FixedArray>(enum_cache, EnumCache::kKeysOffset);
+
+ // Check if we have enum indices available.
+ TNode<FixedArray> enum_indices =
+ LoadObjectField<FixedArray>(enum_cache, EnumCache::kIndicesOffset);
+ TNode<IntPtrT> enum_indices_length =
+ LoadAndUntagFixedArrayBaseLength(enum_indices);
+ TNode<Smi> feedback = SelectSmiConstant(
+ IntPtrLessThanOrEqual(enum_length, enum_indices_length),
+ static_cast<int>(ForInFeedback::kEnumCacheKeysAndIndices),
+ static_cast<int>(ForInFeedback::kEnumCacheKeys));
+ UpdateFeedback(feedback, maybe_feedback_vector, slot);
+
+ cache_array = enum_keys;
+ cache_length = SmiTag(Signed(enum_length));
+ Goto(&out);
+ }
+
+ BIND(&if_slow);
+ {
+ // The {enumerator} is a FixedArray with all the keys to iterate.
+ TNode<FixedArray> array_enumerator = CAST(enumerator);
+
+ // Record the fact that we hit the for-in slow-path.
+ UpdateFeedback(SmiConstant(ForInFeedback::kAny), maybe_feedback_vector,
+ slot);
+
+ cache_array = array_enumerator;
+ cache_length = LoadFixedArrayBaseLength(array_enumerator);
+ Goto(&out);
+ }
+
+ BIND(&out);
+ *cache_array_out = cache_array.value();
+ *cache_length_out = cache_length.value();
+}
+
+TNode<FixedArray> CodeStubAssembler::ForInPrepareForTorque(
+ TNode<HeapObject> enumerator, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector) {
+ TNode<FixedArray> cache_array;
+ TNode<Smi> cache_length;
+ ForInPrepare(enumerator, slot, maybe_feedback_vector, &cache_array,
+ &cache_length);
+
+ TNode<FixedArray> result = AllocateUninitializedFixedArray(2);
+ StoreFixedArrayElement(result, 0, cache_array);
+ StoreFixedArrayElement(result, 1, cache_length);
+
+ return result;
+}
+
TNode<String> CodeStubAssembler::Typeof(SloppyTNode<Object> value) {
TVARIABLE(String, result_var);
@@ -12194,33 +12336,15 @@ TNode<String> CodeStubAssembler::Typeof(SloppyTNode<Object> value) {
return result_var.value();
}
-TNode<Object> CodeStubAssembler::GetSuperConstructor(
- TNode<Context> context, TNode<JSFunction> active_function) {
- Label is_not_constructor(this, Label::kDeferred), out(this);
- TVARIABLE(Object, result);
-
+TNode<HeapObject> CodeStubAssembler::GetSuperConstructor(
+ TNode<JSFunction> active_function) {
TNode<Map> map = LoadMap(active_function);
- TNode<HeapObject> prototype = LoadMapPrototype(map);
- TNode<Map> prototype_map = LoadMap(prototype);
- GotoIfNot(IsConstructorMap(prototype_map), &is_not_constructor);
-
- result = prototype;
- Goto(&out);
-
- BIND(&is_not_constructor);
- {
- CallRuntime(Runtime::kThrowNotSuperConstructor, context, prototype,
- active_function);
- Unreachable();
- }
-
- BIND(&out);
- return result.value();
+ return LoadMapPrototype(map);
}
TNode<JSReceiver> CodeStubAssembler::SpeciesConstructor(
- SloppyTNode<Context> context, SloppyTNode<Object> object,
- SloppyTNode<JSReceiver> default_constructor) {
+ TNode<Context> context, SloppyTNode<Object> object,
+ TNode<JSReceiver> default_constructor) {
Isolate* isolate = this->isolate();
TVARIABLE(JSReceiver, var_result, default_constructor);
@@ -12334,7 +12458,7 @@ TNode<Oddball> CodeStubAssembler::InstanceOf(TNode<Object> object,
return var_result.value();
}
-TNode<Number> CodeStubAssembler::NumberInc(SloppyTNode<Number> value) {
+TNode<Number> CodeStubAssembler::NumberInc(TNode<Number> value) {
TVARIABLE(Number, var_result);
TVARIABLE(Float64T, var_finc_value);
Label if_issmi(this), if_isnotsmi(this), do_finc(this), end(this);
@@ -12377,7 +12501,7 @@ TNode<Number> CodeStubAssembler::NumberInc(SloppyTNode<Number> value) {
return var_result.value();
}
-TNode<Number> CodeStubAssembler::NumberDec(SloppyTNode<Number> value) {
+TNode<Number> CodeStubAssembler::NumberDec(TNode<Number> value) {
TVARIABLE(Number, var_result);
TVARIABLE(Float64T, var_fdec_value);
Label if_issmi(this), if_isnotsmi(this), do_fdec(this), end(this);
@@ -12420,8 +12544,7 @@ TNode<Number> CodeStubAssembler::NumberDec(SloppyTNode<Number> value) {
return var_result.value();
}
-TNode<Number> CodeStubAssembler::NumberAdd(SloppyTNode<Number> a,
- SloppyTNode<Number> b) {
+TNode<Number> CodeStubAssembler::NumberAdd(TNode<Number> a, TNode<Number> b) {
TVARIABLE(Number, var_result);
Label float_add(this, Label::kDeferred), end(this);
GotoIf(TaggedIsNotSmi(a), &float_add);
@@ -12442,8 +12565,7 @@ TNode<Number> CodeStubAssembler::NumberAdd(SloppyTNode<Number> a,
return var_result.value();
}
-TNode<Number> CodeStubAssembler::NumberSub(SloppyTNode<Number> a,
- SloppyTNode<Number> b) {
+TNode<Number> CodeStubAssembler::NumberSub(TNode<Number> a, TNode<Number> b) {
TVARIABLE(Number, var_result);
Label float_sub(this, Label::kDeferred), end(this);
GotoIf(TaggedIsNotSmi(a), &float_sub);
@@ -12509,7 +12631,7 @@ TNode<Number> CodeStubAssembler::BitwiseOp(TNode<Word32T> left32,
}
TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult(
- SloppyTNode<Context> context, SloppyTNode<Object> value,
+ TNode<Context> context, SloppyTNode<Object> value,
SloppyTNode<Oddball> done) {
CSA_ASSERT(this, IsBoolean(done));
TNode<NativeContext> native_context = LoadNativeContext(context);
@@ -12569,7 +12691,7 @@ TNode<JSReceiver> CodeStubAssembler::ArraySpeciesCreate(TNode<Context> context,
}
void CodeStubAssembler::ThrowIfArrayBufferIsDetached(
- SloppyTNode<Context> context, TNode<JSArrayBuffer> array_buffer,
+ TNode<Context> context, TNode<JSArrayBuffer> array_buffer,
const char* method_name) {
Label if_detached(this, Label::kDeferred), if_not_detached(this);
Branch(IsDetachedBuffer(array_buffer), &if_detached, &if_not_detached);
@@ -12579,7 +12701,7 @@ void CodeStubAssembler::ThrowIfArrayBufferIsDetached(
}
void CodeStubAssembler::ThrowIfArrayBufferViewBufferIsDetached(
- SloppyTNode<Context> context, TNode<JSArrayBufferView> array_buffer_view,
+ TNode<Context> context, TNode<JSArrayBufferView> array_buffer_view,
const char* method_name) {
TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array_buffer_view);
ThrowIfArrayBufferIsDetached(context, buffer, method_name);
@@ -12587,7 +12709,9 @@ void CodeStubAssembler::ThrowIfArrayBufferViewBufferIsDetached(
TNode<RawPtrT> CodeStubAssembler::LoadJSArrayBufferBackingStorePtr(
TNode<JSArrayBuffer> array_buffer) {
- return DecodeExternalPointer(LoadJSArrayBufferBackingStore(array_buffer));
+ return LoadExternalPointerFromObject(array_buffer,
+ JSArrayBuffer::kBackingStoreOffset,
+ kArrayBufferBackingStoreTag);
}
TNode<JSArrayBuffer> CodeStubAssembler::LoadJSArrayBufferViewBuffer(
@@ -12641,50 +12765,28 @@ CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler,
argc_(argc),
base_(),
fp_(fp != nullptr ? fp : assembler_->LoadFramePointer()) {
-#ifdef V8_REVERSE_JSARGS
TNode<IntPtrT> offset = assembler_->IntPtrConstant(
(StandardFrameConstants::kFixedSlotCountAboveFp + 1) *
kSystemPointerSize);
-#else
- TNode<IntPtrT> offset = assembler_->ElementOffsetFromIndex(
- argc_, SYSTEM_POINTER_ELEMENTS,
- (StandardFrameConstants::kFixedSlotCountAboveFp - 1) *
- kSystemPointerSize);
-#endif
// base_ points to the first argument, not the receiver
// whether present or not.
base_ = assembler_->RawPtrAdd(fp_, offset);
}
TNode<Object> CodeStubArguments::GetReceiver() const {
-#ifdef V8_REVERSE_JSARGS
intptr_t offset = -kSystemPointerSize;
-#else
- intptr_t offset = kSystemPointerSize;
-#endif
return assembler_->LoadFullTagged(base_, assembler_->IntPtrConstant(offset));
}
void CodeStubArguments::SetReceiver(TNode<Object> object) const {
-#ifdef V8_REVERSE_JSARGS
intptr_t offset = -kSystemPointerSize;
-#else
- intptr_t offset = kSystemPointerSize;
-#endif
assembler_->StoreFullTaggedNoWriteBarrier(
base_, assembler_->IntPtrConstant(offset), object);
}
TNode<RawPtrT> CodeStubArguments::AtIndexPtr(TNode<IntPtrT> index) const {
-#ifdef V8_REVERSE_JSARGS
TNode<IntPtrT> offset =
assembler_->ElementOffsetFromIndex(index, SYSTEM_POINTER_ELEMENTS, 0);
-#else
- TNode<IntPtrT> negated_index =
- assembler_->IntPtrOrSmiSub(assembler_->IntPtrConstant(0), index);
- TNode<IntPtrT> offset = assembler_->ElementOffsetFromIndex(
- negated_index, SYSTEM_POINTER_ELEMENTS, 0);
-#endif
return assembler_->RawPtrAdd(base_, offset);
}
@@ -12730,11 +12832,7 @@ void CodeStubArguments::ForEach(
}
TNode<RawPtrT> start = AtIndexPtr(first);
TNode<RawPtrT> end = AtIndexPtr(last);
-#ifdef V8_REVERSE_JSARGS
const int increment = kSystemPointerSize;
-#else
- const int increment = -kSystemPointerSize;
-#endif
assembler_->BuildFastLoop<RawPtrT>(
vars, start, end,
[&](TNode<RawPtrT> current) {
@@ -12879,7 +12977,7 @@ TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
}
TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
- SloppyTNode<SharedFunctionInfo> shared_info, Label* if_compile_lazy) {
+ TNode<SharedFunctionInfo> shared_info, Label* if_compile_lazy) {
TNode<Object> sfi_data =
LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
@@ -13112,7 +13210,7 @@ void CodeStubAssembler::Print(const char* s) {
}
void CodeStubAssembler::Print(const char* prefix,
- SloppyTNode<MaybeObject> tagged_value) {
+ TNode<MaybeObject> tagged_value) {
if (prefix != nullptr) {
std::string formatted(prefix);
formatted += ": ";
@@ -13166,13 +13264,8 @@ TNode<Object> CodeStubAssembler::CallRuntimeNewArray(
// Runtime_NewArray receives arguments in the JS order (to avoid unnecessary
// copy). Except the last two (new_target and allocation_site) which are add
// on top of the stack later.
-#ifdef V8_REVERSE_JSARGS
return CallRuntime(Runtime::kNewArray, context, length, receiver, new_target,
allocation_site);
-#else
- return CallRuntime(Runtime::kNewArray, context, receiver, length, new_target,
- allocation_site);
-#endif
}
void CodeStubAssembler::TailCallRuntimeNewArray(TNode<Context> context,
@@ -13183,13 +13276,8 @@ void CodeStubAssembler::TailCallRuntimeNewArray(TNode<Context> context,
// Runtime_NewArray receives arguments in the JS order (to avoid unnecessary
// copy). Except the last two (new_target and allocation_site) which are add
// on top of the stack later.
-#ifdef V8_REVERSE_JSARGS
return TailCallRuntime(Runtime::kNewArray, context, length, receiver,
new_target, allocation_site);
-#else
- return TailCallRuntime(Runtime::kNewArray, context, receiver, length,
- new_target, allocation_site);
-#endif
}
TNode<JSArray> CodeStubAssembler::ArrayCreate(TNode<Context> context,
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 8306b7e466..89e9556b9e 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -15,6 +15,7 @@
#include "src/compiler/code-assembler.h"
#include "src/objects/arguments.h"
#include "src/objects/bigint.h"
+#include "src/objects/feedback-vector.h"
#include "src/objects/js-function.h"
#include "src/objects/objects.h"
#include "src/objects/promise.h"
@@ -247,15 +248,16 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#define CSA_ASSERT_BRANCH(csa, gen, ...) \
(csa)->Assert(gen, #gen, __FILE__, __LINE__, CSA_ASSERT_ARGS(__VA_ARGS__))
-#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
- (csa)->Assert( \
- [&]() -> TNode<BoolT> { \
- const TNode<Word32T> argc = UncheckedCast<Word32T>( \
- (csa)->Parameter(Descriptor::kJSActualArgumentsCount)); \
- return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
- }, \
- "argc " #op " " #expected, __FILE__, __LINE__, \
- {{SmiFromInt32((csa)->Parameter(Descriptor::kJSActualArgumentsCount)), \
+#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
+ (csa)->Assert( \
+ [&]() -> TNode<BoolT> { \
+ const TNode<Word32T> argc = (csa)->UncheckedParameter<Word32T>( \
+ Descriptor::kJSActualArgumentsCount); \
+ return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
+ }, \
+ "argc " #op " " #expected, __FILE__, __LINE__, \
+ {{SmiFromInt32((csa)->UncheckedParameter<Int32T>( \
+ Descriptor::kJSActualArgumentsCount)), \
"argc"}})
#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
@@ -554,9 +556,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Float64T> Float64RoundToEven(SloppyTNode<Float64T> x);
TNode<Float64T> Float64Trunc(SloppyTNode<Float64T> x);
// Select the minimum of the two provided Number values.
- TNode<Number> NumberMax(SloppyTNode<Number> left, SloppyTNode<Number> right);
+ TNode<Number> NumberMax(TNode<Number> left, TNode<Number> right);
// Select the minimum of the two provided Number values.
- TNode<Number> NumberMin(SloppyTNode<Number> left, SloppyTNode<Number> right);
+ TNode<Number> NumberMin(TNode<Number> left, TNode<Number> right);
// Returns true iff the given value fits into smi range and is >= 0.
TNode<BoolT> IsValidPositiveSmi(TNode<IntPtrT> value);
@@ -718,10 +720,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
#undef BINT_COMPARISON_OP
// Smi | HeapNumber operations.
- TNode<Number> NumberInc(SloppyTNode<Number> value);
- TNode<Number> NumberDec(SloppyTNode<Number> value);
- TNode<Number> NumberAdd(SloppyTNode<Number> a, SloppyTNode<Number> b);
- TNode<Number> NumberSub(SloppyTNode<Number> a, SloppyTNode<Number> b);
+ TNode<Number> NumberInc(TNode<Number> value);
+ TNode<Number> NumberDec(TNode<Number> value);
+ TNode<Number> NumberAdd(TNode<Number> a, TNode<Number> b);
+ TNode<Number> NumberSub(TNode<Number> a, TNode<Number> b);
void GotoIfNotNumber(TNode<Object> value, Label* is_not_number);
void GotoIfNumber(TNode<Object> value, Label* is_number);
TNode<Number> SmiToNumber(TNode<Smi> v) { return v; }
@@ -754,7 +756,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void Assert(const NodeGenerator<BoolT>& condition_body, const char* message,
const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes = {});
- void Assert(SloppyTNode<Word32T> condition_node, const char* message,
+ void Assert(TNode<Word32T> condition_node, const char* message,
const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes = {});
void Check(const BranchGenerator& branch, const char* message,
@@ -763,7 +765,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void Check(const NodeGenerator<BoolT>& condition_body, const char* message,
const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes = {});
- void Check(SloppyTNode<Word32T> condition_node, const char* message,
+ void Check(TNode<Word32T> condition_node, const char* message,
const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes = {});
void FailAssert(const char* message,
@@ -857,22 +859,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
condition, [=] { return true_value; }, [=] { return false_value; });
}
- TNode<Int32T> SelectInt32Constant(SloppyTNode<BoolT> condition,
- int true_value, int false_value);
- TNode<IntPtrT> SelectIntPtrConstant(SloppyTNode<BoolT> condition,
- int true_value, int false_value);
- TNode<Oddball> SelectBooleanConstant(SloppyTNode<BoolT> condition);
- TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, Smi true_value,
+ TNode<Int32T> SelectInt32Constant(TNode<BoolT> condition, int true_value,
+ int false_value);
+ TNode<IntPtrT> SelectIntPtrConstant(TNode<BoolT> condition, int true_value,
+ int false_value);
+ TNode<Oddball> SelectBooleanConstant(TNode<BoolT> condition);
+ TNode<Smi> SelectSmiConstant(TNode<BoolT> condition, Smi true_value,
Smi false_value);
- TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, int true_value,
+ TNode<Smi> SelectSmiConstant(TNode<BoolT> condition, int true_value,
Smi false_value) {
return SelectSmiConstant(condition, Smi::FromInt(true_value), false_value);
}
- TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, Smi true_value,
+ TNode<Smi> SelectSmiConstant(TNode<BoolT> condition, Smi true_value,
int false_value) {
return SelectSmiConstant(condition, true_value, Smi::FromInt(false_value));
}
- TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, int true_value,
+ TNode<Smi> SelectSmiConstant(TNode<BoolT> condition, int true_value,
int false_value) {
return SelectSmiConstant(condition, Smi::FromInt(true_value),
Smi::FromInt(false_value));
@@ -889,8 +891,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> TruncateIntPtrToInt32(SloppyTNode<IntPtrT> value);
// Check a value for smi-ness
- TNode<BoolT> TaggedIsSmi(SloppyTNode<MaybeObject> a);
- TNode<BoolT> TaggedIsNotSmi(SloppyTNode<MaybeObject> a);
+ TNode<BoolT> TaggedIsSmi(TNode<MaybeObject> a);
+ TNode<BoolT> TaggedIsNotSmi(TNode<MaybeObject> a);
// Check that the value is a non-negative smi.
TNode<BoolT> TaggedIsPositiveSmi(SloppyTNode<Object> a);
@@ -967,29 +969,87 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise.
void GotoIfForceSlowPath(Label* if_true);
- // Convert external pointer from on-V8-heap representation to an actual
- // external pointer value.
- TNode<RawPtrT> DecodeExternalPointer(
- TNode<ExternalPointerT> encoded_pointer) {
- STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
- TNode<RawPtrT> value = ReinterpretCast<RawPtrT>(encoded_pointer);
- if (V8_HEAP_SANDBOX_BOOL) {
- value = UncheckedCast<RawPtrT>(
- WordXor(value, UintPtrConstant(kExternalPointerSalt)));
- }
- return value;
+ //
+ // ExternalPointerT-related functionality.
+ //
+
+ TNode<ExternalPointerT> ChangeUint32ToExternalPointer(TNode<Uint32T> value);
+ TNode<Uint32T> ChangeExternalPointerToUint32(TNode<ExternalPointerT> value);
+
+ // Initialize an external pointer field in an object.
+ void InitializeExternalPointerField(TNode<HeapObject> object, int offset) {
+ InitializeExternalPointerField(object, IntPtrConstant(offset));
}
+ void InitializeExternalPointerField(TNode<HeapObject> object,
+ TNode<IntPtrT> offset);
- // Convert external pointer value to on-V8-heap representation.
- // This should eventually become a call to a non-allocating runtime function.
- TNode<ExternalPointerT> EncodeExternalPointer(TNode<RawPtrT> pointer) {
- STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
- TNode<RawPtrT> encoded_pointer = pointer;
- if (V8_HEAP_SANDBOX_BOOL) {
- encoded_pointer = UncheckedCast<RawPtrT>(
- WordXor(encoded_pointer, UintPtrConstant(kExternalPointerSalt)));
- }
- return ReinterpretCast<ExternalPointerT>(encoded_pointer);
+ // Initialize an external pointer field in an object with given value.
+ void InitializeExternalPointerField(TNode<HeapObject> object, int offset,
+ TNode<RawPtrT> pointer,
+ ExternalPointerTag tag) {
+ InitializeExternalPointerField(object, IntPtrConstant(offset), pointer,
+ tag);
+ }
+
+ void InitializeExternalPointerField(TNode<HeapObject> object,
+ TNode<IntPtrT> offset,
+ TNode<RawPtrT> pointer,
+ ExternalPointerTag tag) {
+ InitializeExternalPointerField(object, offset);
+ StoreExternalPointerToObject(object, offset, pointer, tag);
+ }
+
+ // Load an external pointer value from an object.
+ TNode<RawPtrT> LoadExternalPointerFromObject(TNode<HeapObject> object,
+ int offset,
+ ExternalPointerTag tag) {
+ return LoadExternalPointerFromObject(object, IntPtrConstant(offset), tag);
+ }
+
+ TNode<RawPtrT> LoadExternalPointerFromObject(TNode<HeapObject> object,
+ TNode<IntPtrT> offset,
+ ExternalPointerTag tag);
+
+ // Store external object pointer to object.
+ void StoreExternalPointerToObject(TNode<HeapObject> object, int offset,
+ TNode<RawPtrT> pointer,
+ ExternalPointerTag tag) {
+ StoreExternalPointerToObject(object, IntPtrConstant(offset), pointer, tag);
+ }
+
+ void StoreExternalPointerToObject(TNode<HeapObject> object,
+ TNode<IntPtrT> offset,
+ TNode<RawPtrT> pointer,
+ ExternalPointerTag tag);
+
+ TNode<RawPtrT> LoadForeignForeignAddressPtr(TNode<Foreign> object) {
+ return LoadExternalPointerFromObject(object, Foreign::kForeignAddressOffset,
+ kForeignForeignAddressTag);
+ }
+
+ TNode<RawPtrT> LoadExternalStringResourcePtr(TNode<ExternalString> object) {
+ return LoadExternalPointerFromObject(
+ object, ExternalString::kResourceOffset, kExternalStringResourceTag);
+ }
+
+ TNode<RawPtrT> LoadExternalStringResourceDataPtr(
+ TNode<ExternalString> object) {
+ return LoadExternalPointerFromObject(object,
+ ExternalString::kResourceDataOffset,
+ kExternalStringResourceDataTag);
+ }
+
+ TNode<RawPtrT> LoadJSTypedArrayExternalPointerPtr(
+ TNode<JSTypedArray> holder) {
+ return LoadExternalPointerFromObject(holder,
+ JSTypedArray::kExternalPointerOffset,
+ kTypedArrayExternalPointerTag);
+ }
+
+ void StoreJSTypedArrayExternalPointerPtr(TNode<JSTypedArray> holder,
+ TNode<RawPtrT> value) {
+ StoreExternalPointerToObject(holder, JSTypedArray::kExternalPointerOffset,
+ value, kTypedArrayExternalPointerTag);
}
// Load value from current parent frame by given offset in bytes.
@@ -1049,13 +1109,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
IntPtrSub(offset, IntPtrConstant(kHeapObjectTag))));
}
// Load a SMI field and untag it.
- TNode<IntPtrT> LoadAndUntagObjectField(SloppyTNode<HeapObject> object,
- int offset);
+ TNode<IntPtrT> LoadAndUntagObjectField(TNode<HeapObject> object, int offset);
// Load a SMI field, untag it, and convert to Word32.
- TNode<Int32T> LoadAndUntagToWord32ObjectField(SloppyTNode<HeapObject> object,
+ TNode<Int32T> LoadAndUntagToWord32ObjectField(TNode<HeapObject> object,
int offset);
- TNode<MaybeObject> LoadMaybeWeakObjectField(SloppyTNode<HeapObject> object,
+ TNode<MaybeObject> LoadMaybeWeakObjectField(TNode<HeapObject> object,
int offset) {
return UncheckedCast<MaybeObject>(LoadObjectField(object, offset));
}
@@ -1124,74 +1183,71 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
// Load the floating point value of a HeapNumber.
- TNode<Float64T> LoadHeapNumberValue(SloppyTNode<HeapObject> object);
+ TNode<Float64T> LoadHeapNumberValue(TNode<HeapObject> object);
// Load the Map of an HeapObject.
- TNode<Map> LoadMap(SloppyTNode<HeapObject> object);
+ TNode<Map> LoadMap(TNode<HeapObject> object);
// Load the instance type of an HeapObject.
- TNode<Uint16T> LoadInstanceType(SloppyTNode<HeapObject> object);
+ TNode<Uint16T> LoadInstanceType(TNode<HeapObject> object);
// Compare the instance the type of the object against the provided one.
- TNode<BoolT> HasInstanceType(SloppyTNode<HeapObject> object,
- InstanceType type);
- TNode<BoolT> DoesntHaveInstanceType(SloppyTNode<HeapObject> object,
+ TNode<BoolT> HasInstanceType(TNode<HeapObject> object, InstanceType type);
+ TNode<BoolT> DoesntHaveInstanceType(TNode<HeapObject> object,
InstanceType type);
- TNode<BoolT> TaggedDoesntHaveInstanceType(SloppyTNode<HeapObject> any_tagged,
+ TNode<BoolT> TaggedDoesntHaveInstanceType(TNode<HeapObject> any_tagged,
InstanceType type);
TNode<Word32T> IsStringWrapperElementsKind(TNode<Map> map);
void GotoIfMapHasSlowProperties(TNode<Map> map, Label* if_slow);
// Load the properties backing store of a JSReceiver.
- TNode<HeapObject> LoadSlowProperties(SloppyTNode<JSReceiver> object);
- TNode<HeapObject> LoadFastProperties(SloppyTNode<JSReceiver> object);
+ TNode<HeapObject> LoadSlowProperties(TNode<JSReceiver> object);
+ TNode<HeapObject> LoadFastProperties(TNode<JSReceiver> object);
// Load the elements backing store of a JSObject.
- TNode<FixedArrayBase> LoadElements(SloppyTNode<JSObject> object) {
+ TNode<FixedArrayBase> LoadElements(TNode<JSObject> object) {
return LoadJSObjectElements(object);
}
// Load the length of a JSArray instance.
TNode<Object> LoadJSArgumentsObjectLength(TNode<Context> context,
TNode<JSArgumentsObject> array);
// Load the length of a fast JSArray instance. Returns a positive Smi.
- TNode<Smi> LoadFastJSArrayLength(SloppyTNode<JSArray> array);
+ TNode<Smi> LoadFastJSArrayLength(TNode<JSArray> array);
// Load the length of a fixed array base instance.
- TNode<Smi> LoadFixedArrayBaseLength(SloppyTNode<FixedArrayBase> array);
+ TNode<Smi> LoadFixedArrayBaseLength(TNode<FixedArrayBase> array);
// Load the length of a fixed array base instance.
- TNode<IntPtrT> LoadAndUntagFixedArrayBaseLength(
- SloppyTNode<FixedArrayBase> array);
+ TNode<IntPtrT> LoadAndUntagFixedArrayBaseLength(TNode<FixedArrayBase> array);
// Load the length of a WeakFixedArray.
TNode<Smi> LoadWeakFixedArrayLength(TNode<WeakFixedArray> array);
- TNode<IntPtrT> LoadAndUntagWeakFixedArrayLength(
- SloppyTNode<WeakFixedArray> array);
+ TNode<IntPtrT> LoadAndUntagWeakFixedArrayLength(TNode<WeakFixedArray> array);
// Load the number of descriptors in DescriptorArray.
TNode<Int32T> LoadNumberOfDescriptors(TNode<DescriptorArray> array);
// Load the number of own descriptors of a map.
TNode<Int32T> LoadNumberOfOwnDescriptors(TNode<Map> map);
// Load the bit field of a Map.
- TNode<Int32T> LoadMapBitField(SloppyTNode<Map> map);
+ TNode<Int32T> LoadMapBitField(TNode<Map> map);
// Load bit field 2 of a map.
- TNode<Int32T> LoadMapBitField2(SloppyTNode<Map> map);
+ TNode<Int32T> LoadMapBitField2(TNode<Map> map);
// Load bit field 3 of a map.
- TNode<Uint32T> LoadMapBitField3(SloppyTNode<Map> map);
+ TNode<Uint32T> LoadMapBitField3(TNode<Map> map);
// Load the instance type of a map.
- TNode<Uint16T> LoadMapInstanceType(SloppyTNode<Map> map);
+ TNode<Uint16T> LoadMapInstanceType(TNode<Map> map);
// Load the ElementsKind of a map.
- TNode<Int32T> LoadMapElementsKind(SloppyTNode<Map> map);
- TNode<Int32T> LoadElementsKind(SloppyTNode<HeapObject> object);
+ TNode<Int32T> LoadMapElementsKind(TNode<Map> map);
+ TNode<Int32T> LoadElementsKind(TNode<HeapObject> object);
// Load the instance descriptors of a map.
- TNode<DescriptorArray> LoadMapDescriptors(SloppyTNode<Map> map);
+ TNode<DescriptorArray> LoadMapDescriptors(TNode<Map> map);
// Load the prototype of a map.
- TNode<HeapObject> LoadMapPrototype(SloppyTNode<Map> map);
+ TNode<HeapObject> LoadMapPrototype(TNode<Map> map);
// Load the instance size of a Map.
- TNode<IntPtrT> LoadMapInstanceSizeInWords(SloppyTNode<Map> map);
+ TNode<IntPtrT> LoadMapInstanceSizeInWords(TNode<Map> map);
// Load the inobject properties start of a Map (valid only for JSObjects).
- TNode<IntPtrT> LoadMapInobjectPropertiesStartInWords(SloppyTNode<Map> map);
+ TNode<IntPtrT> LoadMapInobjectPropertiesStartInWords(TNode<Map> map);
// Load the constructor function index of a Map (only for primitive maps).
- TNode<IntPtrT> LoadMapConstructorFunctionIndex(SloppyTNode<Map> map);
+ TNode<IntPtrT> LoadMapConstructorFunctionIndex(TNode<Map> map);
// Load the constructor of a Map (equivalent to Map::GetConstructor()).
- TNode<Object> LoadMapConstructor(SloppyTNode<Map> map);
+ TNode<Object> LoadMapConstructor(TNode<Map> map);
// Load the EnumLength of a Map.
- TNode<WordT> LoadMapEnumLength(SloppyTNode<Map> map);
+ TNode<WordT> LoadMapEnumLength(TNode<Map> map);
// Load the back-pointer of a Map.
- TNode<Object> LoadMapBackPointer(SloppyTNode<Map> map);
+ TNode<Object> LoadMapBackPointer(TNode<Map> map);
// Checks that |map| has only simple properties, returns bitfield3.
TNode<Uint32T> EnsureOnlyHasSimpleProperties(TNode<Map> map,
TNode<Int32T> instance_type,
@@ -1206,7 +1262,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> length);
// Check if the map is set for slow properties.
- TNode<BoolT> IsDictionaryMap(SloppyTNode<Map> map);
+ TNode<BoolT> IsDictionaryMap(TNode<Map> map);
// Load the Name::hash() value of a name as an uint32 value.
// If {if_hash_not_computed} label is specified then it also checks if
@@ -1218,9 +1274,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Load length field of a String object as Smi value.
TNode<Smi> LoadStringLengthAsSmi(TNode<String> string);
// Load length field of a String object as intptr_t value.
- TNode<IntPtrT> LoadStringLengthAsWord(SloppyTNode<String> string);
+ TNode<IntPtrT> LoadStringLengthAsWord(TNode<String> string);
// Load length field of a String object as uint32_t value.
- TNode<Uint32T> LoadStringLengthAsWord32(SloppyTNode<String> string);
+ TNode<Uint32T> LoadStringLengthAsWord32(TNode<String> string);
// Load value field of a JSPrimitiveWrapper object.
TNode<Object> LoadJSPrimitiveWrapperValue(TNode<JSPrimitiveWrapper> object);
@@ -1399,12 +1455,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> LoadScopeInfoHasExtensionField(TNode<ScopeInfo> scope_info);
// Context manipulation:
- void StoreContextElementNoWriteBarrier(SloppyTNode<Context> context,
- int slot_index,
+ void StoreContextElementNoWriteBarrier(TNode<Context> context, int slot_index,
SloppyTNode<Object> value);
- TNode<NativeContext> LoadNativeContext(SloppyTNode<Context> context);
+ TNode<NativeContext> LoadNativeContext(TNode<Context> context);
// Calling this is only valid if there's a module context in the chain.
- TNode<Context> LoadModuleContext(SloppyTNode<Context> context);
+ TNode<Context> LoadModuleContext(TNode<Context> context);
void GotoIfContextElementEqual(SloppyTNode<Object> value,
TNode<NativeContext> native_context,
@@ -1419,9 +1474,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<NativeContext> native_context);
TNode<Map> LoadJSArrayElementsMap(ElementsKind kind,
- SloppyTNode<NativeContext> native_context);
+ TNode<NativeContext> native_context);
TNode<Map> LoadJSArrayElementsMap(SloppyTNode<Int32T> kind,
- SloppyTNode<NativeContext> native_context);
+ TNode<NativeContext> native_context);
TNode<BoolT> IsJSFunctionWithPrototypeSlot(TNode<HeapObject> object);
TNode<BoolT> IsGeneratorFunction(TNode<JSFunction> function);
@@ -1435,7 +1490,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* if_bailout);
TNode<BytecodeArray> LoadSharedFunctionInfoBytecodeArray(
- SloppyTNode<SharedFunctionInfo> shared);
+ TNode<SharedFunctionInfo> shared);
void StoreObjectByteNoWriteBarrier(TNode<HeapObject> object, int offset,
TNode<Word32T> value);
@@ -1699,10 +1754,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectBodyWithSlackTracking(
- SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ TNode<HeapObject> object, TNode<Map> map,
SloppyTNode<IntPtrT> instance_size);
void InitializeJSObjectBodyNoSlackTracking(
- SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ TNode<HeapObject> object, TNode<Map> map,
SloppyTNode<IntPtrT> instance_size,
int start_offset = JSObject::kHeaderSize);
@@ -1823,7 +1878,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<PropertyArray> AllocatePropertyArray(TNode<IntPtrT> capacity);
// TODO(v8:9722): Return type should be JSIteratorResult
- TNode<JSObject> AllocateJSIteratorResult(SloppyTNode<Context> context,
+ TNode<JSObject> AllocateJSIteratorResult(TNode<Context> context,
SloppyTNode<Object> value,
SloppyTNode<Oddball> done);
@@ -2040,10 +2095,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// compatible only with HOLEY_ELEMENTS and PACKED_ELEMENTS.
template <typename TIndex>
TNode<FixedArray> ExtractToFixedArray(
- SloppyTNode<FixedArrayBase> source, TNode<TIndex> first,
- TNode<TIndex> count, TNode<TIndex> capacity, TNode<Map> source_map,
- ElementsKind from_kind, AllocationFlags allocation_flags,
- ExtractFixedArrayFlags extract_flags, HoleConversionMode convert_holes,
+ TNode<FixedArrayBase> source, TNode<TIndex> first, TNode<TIndex> count,
+ TNode<TIndex> capacity, TNode<Map> source_map, ElementsKind from_kind,
+ AllocationFlags allocation_flags, ExtractFixedArrayFlags extract_flags,
+ HoleConversionMode convert_holes,
TVariable<BoolT>* var_holes_converted = nullptr,
base::Optional<TNode<Int32T>> source_runtime_kind = base::nullopt);
@@ -2138,9 +2193,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Float64T> TryTaggedToFloat64(TNode<Object> value,
Label* if_valueisnotnumber);
- TNode<Float64T> TruncateTaggedToFloat64(SloppyTNode<Context> context,
+ TNode<Float64T> TruncateTaggedToFloat64(TNode<Context> context,
SloppyTNode<Object> value);
- TNode<Word32T> TruncateTaggedToWord32(SloppyTNode<Context> context,
+ TNode<Word32T> TruncateTaggedToWord32(TNode<Context> context,
SloppyTNode<Object> value);
void TaggedToWord32OrBigInt(TNode<Context> context, TNode<Object> value,
Label* if_number, TVariable<Word32T>* var_word32,
@@ -2254,87 +2309,87 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsNoElementsProtectorCellInvalid();
TNode<BoolT> IsArrayIteratorProtectorCellInvalid();
TNode<BoolT> IsBigIntInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsBigInt(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsBoolean(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsCallableMap(SloppyTNode<Map> map);
- TNode<BoolT> IsCallable(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsBigInt(TNode<HeapObject> object);
+ TNode<BoolT> IsBoolean(TNode<HeapObject> object);
+ TNode<BoolT> IsCallableMap(TNode<Map> map);
+ TNode<BoolT> IsCallable(TNode<HeapObject> object);
TNode<BoolT> TaggedIsCallable(TNode<Object> object);
TNode<BoolT> IsConsStringInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsConstructorMap(SloppyTNode<Map> map);
- TNode<BoolT> IsConstructor(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsDeprecatedMap(SloppyTNode<Map> map);
- TNode<BoolT> IsNameDictionary(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsGlobalDictionary(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsExtensibleMap(SloppyTNode<Map> map);
+ TNode<BoolT> IsConstructorMap(TNode<Map> map);
+ TNode<BoolT> IsConstructor(TNode<HeapObject> object);
+ TNode<BoolT> IsDeprecatedMap(TNode<Map> map);
+ TNode<BoolT> IsNameDictionary(TNode<HeapObject> object);
+ TNode<BoolT> IsGlobalDictionary(TNode<HeapObject> object);
+ TNode<BoolT> IsExtensibleMap(TNode<Map> map);
TNode<BoolT> IsExtensibleNonPrototypeMap(TNode<Map> map);
TNode<BoolT> IsExternalStringInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsFixedArray(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsFixedArraySubclass(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsFixedArrayWithKind(SloppyTNode<HeapObject> object,
+ TNode<BoolT> IsFixedArray(TNode<HeapObject> object);
+ TNode<BoolT> IsFixedArraySubclass(TNode<HeapObject> object);
+ TNode<BoolT> IsFixedArrayWithKind(TNode<HeapObject> object,
ElementsKind kind);
- TNode<BoolT> IsFixedArrayWithKindOrEmpty(SloppyTNode<FixedArrayBase> object,
+ TNode<BoolT> IsFixedArrayWithKindOrEmpty(TNode<FixedArrayBase> object,
ElementsKind kind);
- TNode<BoolT> IsFunctionWithPrototypeSlotMap(SloppyTNode<Map> map);
- TNode<BoolT> IsHashTable(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsEphemeronHashTable(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsFunctionWithPrototypeSlotMap(TNode<Map> map);
+ TNode<BoolT> IsHashTable(TNode<HeapObject> object);
+ TNode<BoolT> IsEphemeronHashTable(TNode<HeapObject> object);
TNode<BoolT> IsHeapNumberInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsOddball(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsOddball(TNode<HeapObject> object);
TNode<BoolT> IsOddballInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsIndirectStringInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSArrayBuffer(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSArrayBuffer(TNode<HeapObject> object);
TNode<BoolT> IsJSDataView(TNode<HeapObject> object);
TNode<BoolT> IsJSArrayInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSArrayMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSArray(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSArrayIterator(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSAsyncGeneratorObject(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSArrayMap(TNode<Map> map);
+ TNode<BoolT> IsJSArray(TNode<HeapObject> object);
+ TNode<BoolT> IsJSArrayIterator(TNode<HeapObject> object);
+ TNode<BoolT> IsJSAsyncGeneratorObject(TNode<HeapObject> object);
TNode<BoolT> IsJSFunctionInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSFunctionMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSFunction(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSBoundFunction(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSFunctionMap(TNode<Map> map);
+ TNode<BoolT> IsJSFunction(TNode<HeapObject> object);
+ TNode<BoolT> IsJSBoundFunction(TNode<HeapObject> object);
TNode<BoolT> IsJSGeneratorObject(TNode<HeapObject> object);
TNode<BoolT> IsJSGlobalProxyInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSGlobalProxyMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSGlobalProxy(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSGlobalProxyMap(TNode<Map> map);
+ TNode<BoolT> IsJSGlobalProxy(TNode<HeapObject> object);
TNode<BoolT> IsJSObjectInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSObjectMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSObject(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSObjectMap(TNode<Map> map);
+ TNode<BoolT> IsJSObject(TNode<HeapObject> object);
TNode<BoolT> IsJSFinalizationRegistryMap(TNode<Map> map);
TNode<BoolT> IsJSFinalizationRegistry(TNode<HeapObject> object);
- TNode<BoolT> IsJSPromiseMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSPromise(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSProxy(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSStringIterator(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSRegExpStringIterator(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSPromiseMap(TNode<Map> map);
+ TNode<BoolT> IsJSPromise(TNode<HeapObject> object);
+ TNode<BoolT> IsJSProxy(TNode<HeapObject> object);
+ TNode<BoolT> IsJSStringIterator(TNode<HeapObject> object);
+ TNode<BoolT> IsJSRegExpStringIterator(TNode<HeapObject> object);
TNode<BoolT> IsJSReceiverInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSReceiverMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSReceiver(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSRegExp(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSReceiverMap(TNode<Map> map);
+ TNode<BoolT> IsJSReceiver(TNode<HeapObject> object);
+ TNode<BoolT> IsJSRegExp(TNode<HeapObject> object);
TNode<BoolT> IsJSTypedArrayInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSTypedArrayMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSTypedArray(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSTypedArrayMap(TNode<Map> map);
+ TNode<BoolT> IsJSTypedArray(TNode<HeapObject> object);
TNode<BoolT> IsJSGeneratorMap(TNode<Map> map);
TNode<BoolT> IsJSPrimitiveWrapperInstanceType(
SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSPrimitiveWrapperMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSPrimitiveWrapper(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsMap(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsName(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSPrimitiveWrapperMap(TNode<Map> map);
+ TNode<BoolT> IsJSPrimitiveWrapper(TNode<HeapObject> object);
+ TNode<BoolT> IsMap(TNode<HeapObject> object);
+ TNode<BoolT> IsName(TNode<HeapObject> object);
TNode<BoolT> IsNameInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsNullOrJSReceiver(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsNullOrJSReceiver(TNode<HeapObject> object);
TNode<BoolT> IsNullOrUndefined(SloppyTNode<Object> object);
- TNode<BoolT> IsNumberDictionary(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsNumberDictionary(TNode<HeapObject> object);
TNode<BoolT> IsOneByteStringInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsSeqOneByteStringInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsPrimitiveInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsPrivateName(SloppyTNode<Symbol> symbol);
- TNode<BoolT> IsPropertyArray(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsPropertyCell(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsPropertyArray(TNode<HeapObject> object);
+ TNode<BoolT> IsPropertyCell(TNode<HeapObject> object);
TNode<BoolT> IsPromiseReactionJobTask(TNode<HeapObject> object);
- TNode<BoolT> IsPrototypeInitialArrayPrototype(SloppyTNode<Context> context,
- SloppyTNode<Map> map);
- TNode<BoolT> IsPrototypeTypedArrayPrototype(SloppyTNode<Context> context,
- SloppyTNode<Map> map);
+ TNode<BoolT> IsPrototypeInitialArrayPrototype(TNode<Context> context,
+ TNode<Map> map);
+ TNode<BoolT> IsPrototypeTypedArrayPrototype(TNode<Context> context,
+ TNode<Map> map);
TNode<BoolT> IsFastAliasedArgumentsMap(TNode<Context> context,
TNode<Map> map);
@@ -2350,9 +2405,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsSpecialReceiverInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsCustomElementsReceiverInstanceType(
TNode<Int32T> instance_type);
- TNode<BoolT> IsSpecialReceiverMap(SloppyTNode<Map> map);
+ TNode<BoolT> IsSpecialReceiverMap(TNode<Map> map);
TNode<BoolT> IsStringInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsString(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsString(TNode<HeapObject> object);
TNode<BoolT> IsSeqOneByteString(TNode<HeapObject> object);
TNode<BoolT> IsSymbolInstanceType(SloppyTNode<Int32T> instance_type);
@@ -2360,8 +2415,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsUniqueName(TNode<HeapObject> object);
TNode<BoolT> IsUniqueNameNoIndex(TNode<HeapObject> object);
TNode<BoolT> IsUniqueNameNoCachedIndex(TNode<HeapObject> object);
- TNode<BoolT> IsUndetectableMap(SloppyTNode<Map> map);
- TNode<BoolT> IsNotWeakFixedArraySubclass(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsUndetectableMap(TNode<Map> map);
+ TNode<BoolT> IsNotWeakFixedArraySubclass(TNode<HeapObject> object);
TNode<BoolT> IsZeroOrContext(SloppyTNode<Object> object);
TNode<BoolT> IsPromiseResolveProtectorCellInvalid();
@@ -2385,8 +2440,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// True iff |number| is either a Smi, or a HeapNumber whose value is not
// within Smi range.
- TNode<BoolT> IsNumberNormalized(SloppyTNode<Number> number);
- TNode<BoolT> IsNumberPositive(SloppyTNode<Number> number);
+ TNode<BoolT> IsNumberNormalized(TNode<Number> number);
+ TNode<BoolT> IsNumberPositive(TNode<Number> number);
TNode<BoolT> IsHeapNumberPositive(TNode<HeapNumber> number);
// True iff {number} is non-negative and less or equal than 2**53-1.
@@ -2463,11 +2518,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Convert a Non-Number object to a Number.
TNode<Number> NonNumberToNumber(
- TNode<Context> context, SloppyTNode<HeapObject> input,
+ TNode<Context> context, TNode<HeapObject> input,
BigIntHandling bigint_handling = BigIntHandling::kThrow);
// Convert a Non-Number object to a Numeric.
TNode<Numeric> NonNumberToNumeric(TNode<Context> context,
- SloppyTNode<HeapObject> input);
+ TNode<HeapObject> input);
// Convert any object to a Number.
// Conforms to ES#sec-tonumber if {bigint_handling} == kThrow.
// With {bigint_handling} == kConvertToNumber, matches behavior of
@@ -2475,7 +2530,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Number> ToNumber(
TNode<Context> context, SloppyTNode<Object> input,
BigIntHandling bigint_handling = BigIntHandling::kThrow);
- TNode<Number> ToNumber_Inline(SloppyTNode<Context> context,
+ TNode<Number> ToNumber_Inline(TNode<Context> context,
SloppyTNode<Object> input);
// Convert any plain primitive to a Number. No need to handle BigInts since
// they are not plain primitives.
@@ -2488,15 +2543,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Converts |input| to one of 2^32 integer values in the range 0 through
// 2^32-1, inclusive.
// ES#sec-touint32
- TNode<Number> ToUint32(SloppyTNode<Context> context,
- SloppyTNode<Object> input);
+ TNode<Number> ToUint32(TNode<Context> context, SloppyTNode<Object> input);
// Convert any object to a String.
- TNode<String> ToString_Inline(SloppyTNode<Context> context,
+ TNode<String> ToString_Inline(TNode<Context> context,
SloppyTNode<Object> input);
- TNode<JSReceiver> ToObject(SloppyTNode<Context> context,
- SloppyTNode<Object> input);
+ TNode<JSReceiver> ToObject(TNode<Context> context, SloppyTNode<Object> input);
// Same as ToObject but avoids the Builtin call if |input| is already a
// JSReceiver.
@@ -2504,7 +2557,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> input);
// ES6 7.1.15 ToLength, but with inlined fast path.
- TNode<Number> ToLength_Inline(SloppyTNode<Context> context,
+ TNode<Number> ToLength_Inline(TNode<Context> context,
SloppyTNode<Object> input);
TNode<Object> OrdinaryToPrimitive(TNode<Context> context, TNode<Object> input,
@@ -2513,7 +2566,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word32|. Returns result as an uint32 node.
template <typename BitField>
- TNode<Uint32T> DecodeWord32(SloppyTNode<Word32T> word32) {
+ TNode<Uint32T> DecodeWord32(TNode<Word32T> word32) {
return DecodeWord32(word32, BitField::kShift, BitField::kMask);
}
@@ -2527,7 +2580,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word32|. Returns result as a word-size node.
template <typename BitField>
- TNode<UintPtrT> DecodeWordFromWord32(SloppyTNode<Word32T> word32) {
+ TNode<UintPtrT> DecodeWordFromWord32(TNode<Word32T> word32) {
return DecodeWord<BitField>(ChangeUint32ToWord(word32));
}
@@ -2540,7 +2593,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
// Decodes an unsigned (!) value from |word32| to an uint32 node.
- TNode<Uint32T> DecodeWord32(SloppyTNode<Word32T> word32, uint32_t shift,
+ TNode<Uint32T> DecodeWord32(TNode<Word32T> word32, uint32_t shift,
uint32_t mask);
// Decodes an unsigned (!) value from |word| to a word-size node.
@@ -2594,24 +2647,24 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Returns true if any of the |T|'s bits in given |word32| are set.
template <typename T>
- TNode<BoolT> IsSetWord32(SloppyTNode<Word32T> word32) {
+ TNode<BoolT> IsSetWord32(TNode<Word32T> word32) {
return IsSetWord32(word32, T::kMask);
}
// Returns true if any of the mask's bits in given |word32| are set.
- TNode<BoolT> IsSetWord32(SloppyTNode<Word32T> word32, uint32_t mask) {
+ TNode<BoolT> IsSetWord32(TNode<Word32T> word32, uint32_t mask) {
return Word32NotEqual(Word32And(word32, Int32Constant(mask)),
Int32Constant(0));
}
// Returns true if none of the mask's bits in given |word32| are set.
- TNode<BoolT> IsNotSetWord32(SloppyTNode<Word32T> word32, uint32_t mask) {
+ TNode<BoolT> IsNotSetWord32(TNode<Word32T> word32, uint32_t mask) {
return Word32Equal(Word32And(word32, Int32Constant(mask)),
Int32Constant(0));
}
// Returns true if all of the mask's bits in a given |word32| are set.
- TNode<BoolT> IsAllSetWord32(SloppyTNode<Word32T> word32, uint32_t mask) {
+ TNode<BoolT> IsAllSetWord32(TNode<Word32T> word32, uint32_t mask) {
TNode<Int32T> const_mask = Int32Constant(mask);
return Word32Equal(Word32And(word32, const_mask), const_mask);
}
@@ -2648,12 +2701,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Returns true if all of the |T|'s bits in given |word32| are clear.
template <typename T>
- TNode<BoolT> IsClearWord32(SloppyTNode<Word32T> word32) {
+ TNode<BoolT> IsClearWord32(TNode<Word32T> word32) {
return IsClearWord32(word32, T::kMask);
}
// Returns true if all of the mask's bits in given |word32| are clear.
- TNode<BoolT> IsClearWord32(SloppyTNode<Word32T> word32, uint32_t mask) {
+ TNode<BoolT> IsClearWord32(TNode<Word32T> word32, uint32_t mask) {
return Word32Equal(Word32And(word32, Int32Constant(mask)),
Int32Constant(0));
}
@@ -2706,7 +2759,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// - |if_not_internalized| if the string is not in the string table (but
// does not add it).
// - |if_bailout| for unsupported cases (e.g. uncachable array index).
- void TryInternalizeString(SloppyTNode<String> string, Label* if_index,
+ void TryInternalizeString(TNode<String> string, Label* if_index,
TVariable<IntPtrT>* var_index,
Label* if_internalized,
TVariable<Name>* var_internalized,
@@ -2880,12 +2933,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// is an accessor then it also calls a getter. If the property is a double
// field it re-wraps value in an immutable heap number. {unique_name} must be
// a unique name (Symbol or InternalizedString) that is not an array index.
- void TryGetOwnProperty(TNode<Context> context, TNode<HeapObject> receiver,
+ void TryGetOwnProperty(TNode<Context> context, TNode<Object> receiver,
TNode<JSReceiver> object, TNode<Map> map,
TNode<Int32T> instance_type, TNode<Name> unique_name,
Label* if_found_value, TVariable<Object>* var_value,
Label* if_not_found, Label* if_bailout);
- void TryGetOwnProperty(TNode<Context> context, TNode<HeapObject> receiver,
+ void TryGetOwnProperty(TNode<Context> context, TNode<Object> receiver,
TNode<JSReceiver> object, TNode<Map> map,
TNode<Int32T> instance_type, TNode<Name> unique_name,
Label* if_found_value, TVariable<Object>* var_value,
@@ -2893,12 +2946,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TVariable<Object>* var_raw_value, Label* if_not_found,
Label* if_bailout, GetOwnPropertyMode mode);
- TNode<Object> GetProperty(SloppyTNode<Context> context,
+ TNode<Object> GetProperty(TNode<Context> context,
SloppyTNode<Object> receiver, Handle<Name> name) {
return GetProperty(context, receiver, HeapConstant(name));
}
- TNode<Object> GetProperty(SloppyTNode<Context> context,
+ TNode<Object> GetProperty(TNode<Context> context,
SloppyTNode<Object> receiver,
SloppyTNode<Object> name) {
return CallBuiltin(Builtins::kGetProperty, context, receiver, name);
@@ -2970,9 +3023,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
//
// Note: this code does not check if the global dictionary points to deleted
// entry! This has to be done by the caller.
- void TryLookupProperty(SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ void TryLookupProperty(TNode<HeapObject> object, TNode<Map> map,
SloppyTNode<Int32T> instance_type,
- SloppyTNode<Name> unique_name, Label* if_found_fast,
+ TNode<Name> unique_name, Label* if_found_fast,
Label* if_found_dict, Label* if_found_global,
TVariable<HeapObject>* var_meta_storage,
TVariable<IntPtrT>* var_name_index,
@@ -3049,19 +3102,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<FeedbackVector> LoadFeedbackVectorForStub();
// Load the value from closure's feedback cell.
- TNode<HeapObject> LoadFeedbackCellValue(SloppyTNode<JSFunction> closure);
+ TNode<HeapObject> LoadFeedbackCellValue(TNode<JSFunction> closure);
// Load the object from feedback vector cell for the given closure.
// The returned object could be undefined if the closure does not have
// a feedback vector associated with it.
- TNode<HeapObject> LoadFeedbackVector(SloppyTNode<JSFunction> closure);
+ TNode<HeapObject> LoadFeedbackVector(TNode<JSFunction> closure);
// Load the ClosureFeedbackCellArray that contains the feedback cells
// used when creating closures from this function. This array could be
// directly hanging off the FeedbackCell when there is no feedback vector
// or available from the feedback vector's header.
TNode<ClosureFeedbackCellArray> LoadClosureFeedbackArray(
- SloppyTNode<JSFunction> closure);
+ TNode<JSFunction> closure);
// Update the type feedback vector.
void UpdateFeedback(TNode<Smi> feedback,
@@ -3084,7 +3137,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Check if a property name might require protector invalidation when it is
// used for a property store or deletion.
- void CheckForAssociatedProtector(SloppyTNode<Name> name, Label* if_protector);
+ void CheckForAssociatedProtector(TNode<Name> name, Label* if_protector);
TNode<Map> LoadReceiverMap(SloppyTNode<Object> receiver);
@@ -3109,8 +3162,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// we pass {value} as BigInt object instead of int64_t. We should
// teach TurboFan to handle int64_t on 32-bit platforms eventually.
template <typename TIndex>
- void StoreElement(Node* elements, ElementsKind kind, TNode<TIndex> index,
- Node* value);
+ void StoreElement(TNode<RawPtrT> elements, ElementsKind kind,
+ TNode<TIndex> index, Node* value);
// Implements the BigInt part of
// https://tc39.github.io/proposal-bigint/#sec-numbertorawbytes,
@@ -3219,10 +3272,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Operation op, TNode<Object> left, TNode<Object> right,
TNode<Context> context, TVariable<Smi>* var_type_feedback = nullptr);
- void BranchIfNumberRelationalComparison(Operation op,
- SloppyTNode<Number> left,
- SloppyTNode<Number> right,
- Label* if_true, Label* if_false);
+ void BranchIfNumberRelationalComparison(Operation op, TNode<Number> left,
+ TNode<Number> right, Label* if_true,
+ Label* if_false);
void BranchIfNumberEqual(TNode<Number> left, TNode<Number> right,
Label* if_true, Label* if_false) {
@@ -3265,12 +3317,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Branch(IsAccessorPair(CAST(value)), if_accessor_pair, if_not_accessor_pair);
}
- void GotoIfNumberGreaterThanOrEqual(SloppyTNode<Number> left,
- SloppyTNode<Number> right,
+ void GotoIfNumberGreaterThanOrEqual(TNode<Number> left, TNode<Number> right,
Label* if_false);
TNode<Oddball> Equal(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
- SloppyTNode<Context> context,
+ TNode<Context> context,
TVariable<Smi>* var_type_feedback = nullptr);
TNode<Oddball> StrictEqual(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
@@ -3291,8 +3342,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum HasPropertyLookupMode { kHasProperty, kForInHasProperty };
- TNode<Oddball> HasProperty(SloppyTNode<Context> context,
- SloppyTNode<Object> object,
+ TNode<Oddball> HasProperty(TNode<Context> context, SloppyTNode<Object> object,
SloppyTNode<Object> key,
HasPropertyLookupMode mode);
@@ -3304,14 +3354,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
HasPropertyLookupMode::kHasProperty);
}
+ void ForInPrepare(TNode<HeapObject> enumerator, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector,
+ TNode<FixedArray>* cache_array_out,
+ TNode<Smi>* cache_length_out);
+ // Returns {cache_array} and {cache_length} in a fixed array of length 2.
+ // TODO(jgruber): Tuple2 would be a slightly better fit as the return type,
+ // but FixedArray has better support and there are no effective drawbacks to
+ // using it instead of Tuple2 in practice.
+ TNode<FixedArray> ForInPrepareForTorque(
+ TNode<HeapObject> enumerator, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector);
+
TNode<String> Typeof(SloppyTNode<Object> value);
- TNode<Object> GetSuperConstructor(TNode<Context> context,
- TNode<JSFunction> active_function);
+ TNode<HeapObject> GetSuperConstructor(TNode<JSFunction> active_function);
- TNode<JSReceiver> SpeciesConstructor(
- SloppyTNode<Context> context, SloppyTNode<Object> object,
- SloppyTNode<JSReceiver> default_constructor);
+ TNode<JSReceiver> SpeciesConstructor(TNode<Context> context,
+ SloppyTNode<Object> object,
+ TNode<JSReceiver> default_constructor);
TNode<Oddball> InstanceOf(TNode<Object> object, TNode<Object> callable,
TNode<Context> context);
@@ -3322,7 +3383,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// JSArrayBuffer helpers
TNode<RawPtrT> LoadJSArrayBufferBackingStorePtr(
TNode<JSArrayBuffer> array_buffer);
- void ThrowIfArrayBufferIsDetached(SloppyTNode<Context> context,
+ void ThrowIfArrayBufferIsDetached(TNode<Context> context,
TNode<JSArrayBuffer> array_buffer,
const char* method_name);
@@ -3334,7 +3395,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<UintPtrT> LoadJSArrayBufferViewByteOffset(
TNode<JSArrayBufferView> array_buffer_view);
void ThrowIfArrayBufferViewBufferIsDetached(
- SloppyTNode<Context> context, TNode<JSArrayBufferView> array_buffer_view,
+ TNode<Context> context, TNode<JSArrayBufferView> array_buffer_view,
const char* method_name);
// JSTypedArray helpers
@@ -3358,9 +3419,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Figure out the SFI's code object using its data field.
// If |if_compile_lazy| is provided then the execution will go to the given
// label in case of an CompileLazy code object.
- TNode<Code> GetSharedFunctionInfoCode(
- SloppyTNode<SharedFunctionInfo> shared_info,
- Label* if_compile_lazy = nullptr);
+ TNode<Code> GetSharedFunctionInfoCode(TNode<SharedFunctionInfo> shared_info,
+ Label* if_compile_lazy = nullptr);
TNode<JSFunction> AllocateFunctionWithMapAndContext(
TNode<Map> map, TNode<SharedFunctionInfo> shared_info,
@@ -3387,8 +3447,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Support for printf-style debugging
void Print(const char* s);
- void Print(const char* prefix, SloppyTNode<MaybeObject> tagged_value);
- void Print(SloppyTNode<MaybeObject> tagged_value) {
+ void Print(const char* prefix, TNode<MaybeObject> tagged_value);
+ void Print(TNode<MaybeObject> tagged_value) {
return Print(nullptr, tagged_value);
}
@@ -3410,6 +3470,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
bool ConstexprInt31Equal(int31_t a, int31_t b) { return a == b; }
bool ConstexprInt31NotEqual(int31_t a, int31_t b) { return a != b; }
bool ConstexprInt31GreaterThanEqual(int31_t a, int31_t b) { return a >= b; }
+ bool ConstexprUint32Equal(uint32_t a, uint32_t b) { return a == b; }
+ bool ConstexprUint32NotEqual(uint32_t a, uint32_t b) { return a != b; }
bool ConstexprInt32Equal(int32_t a, int32_t b) { return a == b; }
bool ConstexprInt32NotEqual(int32_t a, int32_t b) { return a != b; }
bool ConstexprInt32GreaterThanEqual(int32_t a, int32_t b) { return a >= b; }
@@ -3449,18 +3511,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Number> length);
// Implements DescriptorArray::Search().
- void DescriptorLookup(SloppyTNode<Name> unique_name,
- SloppyTNode<DescriptorArray> descriptors,
- SloppyTNode<Uint32T> bitfield3, Label* if_found,
+ void DescriptorLookup(TNode<Name> unique_name,
+ TNode<DescriptorArray> descriptors,
+ TNode<Uint32T> bitfield3, Label* if_found,
TVariable<IntPtrT>* var_name_index,
Label* if_not_found);
// Implements TransitionArray::SearchName() - searches for first transition
// entry with given name (note that there could be multiple entries with
// the same name).
- void TransitionLookup(SloppyTNode<Name> unique_name,
- SloppyTNode<TransitionArray> transitions,
- Label* if_found, TVariable<IntPtrT>* var_name_index,
+ void TransitionLookup(TNode<Name> unique_name,
+ TNode<TransitionArray> transitions, Label* if_found,
+ TVariable<IntPtrT>* var_name_index,
Label* if_not_found);
// Implements generic search procedure like i::Search<Array>().
@@ -3569,6 +3631,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<JSFinalizationRegistry> finalization_registry,
TNode<WeakCell> weak_cell);
+ TNode<IntPtrT> FeedbackIteratorSizeFor(int number_of_entries) {
+ return IntPtrConstant(FeedbackIterator::SizeFor(number_of_entries));
+ }
+
+ TNode<IntPtrT> FeedbackIteratorMapIndexForEntry(int entry) {
+ return IntPtrConstant(FeedbackIterator::MapIndexForEntry(entry));
+ }
+
+ TNode<IntPtrT> FeedbackIteratorHandlerIndexForEntry(int entry) {
+ return IntPtrConstant(FeedbackIterator::HandlerIndexForEntry(entry));
+ }
+
private:
friend class CodeStubArguments;
@@ -3658,8 +3732,26 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> value, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
int additional_offset = 0);
- // Converts {input} to a number. {input} must be a plain primitve.
- TNode<Number> PlainPrimitiveNonNumberToNumber(TNode<HeapObject> input);
+ // Store value to an elements array with given elements kind.
+ // TODO(turbofan): For BIGINT64_ELEMENTS and BIGUINT64_ELEMENTS
+ // we pass {value} as BigInt object instead of int64_t. We should
+ // teach TurboFan to handle int64_t on 32-bit platforms eventually.
+ // TODO(solanes): This method can go away and simplify into only one version
+ // of StoreElement once we have "if constexpr" available to use.
+ template <typename TArray, typename TIndex>
+ void StoreElementBigIntOrTypedArray(TNode<TArray> elements, ElementsKind kind,
+ TNode<TIndex> index, Node* value);
+
+ template <typename TIndex>
+ void StoreElement(TNode<FixedArrayBase> elements, ElementsKind kind,
+ TNode<TIndex> index, Node* value);
+
+ // Converts {input} to a number if {input} is a plain primitve (i.e. String or
+ // Oddball) and stores the result in {var_result}. Otherwise, it bails out to
+ // {if_bailout}.
+ void TryPlainPrimitiveNonNumberToNumber(TNode<HeapObject> input,
+ TVariable<Number>* var_result,
+ Label* if_bailout);
};
class V8_EXPORT_PRIVATE CodeStubArguments {
diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc
index 9c5cb42edd..f5e9bb8988 100644
--- a/deps/v8/src/codegen/compilation-cache.cc
+++ b/deps/v8/src/codegen/compilation-cache.cc
@@ -8,7 +8,7 @@
#include "src/heap/factory.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
-#include "src/objects/compilation-cache-inl.h"
+#include "src/objects/compilation-cache-table-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
#include "src/objects/visitors.h"
@@ -72,12 +72,16 @@ void CompilationSubCache::AgeCustom(CompilationSubCache* c) {
CompilationCacheTable::cast(c->tables_[0]).Age();
}
-void CompilationCacheScript::Age() { AgeCustom(this); }
+void CompilationCacheScript::Age() {
+ if (FLAG_isolate_script_cache_ageing) AgeCustom(this);
+}
void CompilationCacheEval::Age() { AgeCustom(this); }
void CompilationCacheRegExp::Age() { AgeByGeneration(this); }
void CompilationCacheCode::Age() {
- if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceAgeing();
- AgeByGeneration(this);
+ if (FLAG_turbo_nci_cache_ageing) {
+ if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceAgeing();
+ AgeByGeneration(this);
+ }
}
void CompilationSubCache::Iterate(RootVisitor* v) {
diff --git a/deps/v8/src/codegen/compilation-cache.h b/deps/v8/src/codegen/compilation-cache.h
index 8aac29fc29..56d90a37da 100644
--- a/deps/v8/src/codegen/compilation-cache.h
+++ b/deps/v8/src/codegen/compilation-cache.h
@@ -6,7 +6,7 @@
#define V8_CODEGEN_COMPILATION_CACHE_H_
#include "src/base/hashmap.h"
-#include "src/objects/compilation-cache.h"
+#include "src/objects/compilation-cache-table.h"
#include "src/utils/allocation.h"
namespace v8 {
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index 33b6bbb945..bb51b3be1e 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -36,6 +36,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/local-factory-inl.h"
#include "src/heap/local-heap-inl.h"
+#include "src/heap/local-heap.h"
#include "src/init/bootstrapper.h"
#include "src/interpreter/interpreter.h"
#include "src/logging/log-inl.h"
@@ -93,16 +94,15 @@ class CompilerTracer : public AllStatic {
static void PrintTracePrefix(const CodeTracer::Scope& scope,
const char* header,
OptimizedCompilationInfo* info) {
- PrintF(scope.file(), "[%s ", header);
- info->closure()->ShortPrint(scope.file());
- PrintF(scope.file(), " (target %s)", CodeKindToString(info->code_kind()));
+ PrintTracePrefix(scope, header, info->closure(), info->code_kind());
}
static void PrintTracePrefix(const CodeTracer::Scope& scope,
- const char* header,
- Handle<JSFunction> function) {
+ const char* header, Handle<JSFunction> function,
+ CodeKind code_kind) {
PrintF(scope.file(), "[%s ", header);
function->ShortPrint(scope.file());
+ PrintF(scope.file(), " (target %s)", CodeKindToString(code_kind));
}
static void PrintTraceSuffix(const CodeTracer::Scope& scope) {
@@ -151,10 +151,11 @@ class CompilerTracer : public AllStatic {
static void TraceOptimizedCodeCacheHit(Isolate* isolate,
Handle<JSFunction> function,
- BailoutId osr_offset) {
+ BailoutId osr_offset,
+ CodeKind code_kind) {
if (!FLAG_trace_opt) return;
CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintTracePrefix(scope, "found optimized code for", function);
+ PrintTracePrefix(scope, "found optimized code for", function, code_kind);
if (!osr_offset.IsNone()) {
PrintF(scope.file(), " at OSR AST id %d", osr_offset.ToInt());
}
@@ -162,13 +163,24 @@ class CompilerTracer : public AllStatic {
}
static void TraceOptimizeForAlwaysOpt(Isolate* isolate,
- Handle<JSFunction> function) {
+ Handle<JSFunction> function,
+ CodeKind code_kind) {
if (!FLAG_trace_opt) return;
CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintTracePrefix(scope, "optimizing", function);
+ PrintTracePrefix(scope, "optimizing", function, code_kind);
PrintF(scope.file(), " because --always-opt");
PrintTraceSuffix(scope);
}
+
+ static void TraceMarkForAlwaysOpt(Isolate* isolate,
+ Handle<JSFunction> function) {
+ if (!FLAG_trace_opt) return;
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[marking ");
+ function->ShortPrint(scope.file());
+ PrintF(scope.file(), " for optimized recompilation because --always-opt");
+ PrintF(scope.file(), "]\n");
+ }
};
} // namespace
@@ -340,12 +352,13 @@ CompilationJob::Status OptimizedCompilationJob::PrepareJob(Isolate* isolate) {
}
CompilationJob::Status OptimizedCompilationJob::ExecuteJob(
- RuntimeCallStats* stats) {
+ RuntimeCallStats* stats, LocalIsolate* local_isolate) {
DisallowHeapAccess no_heap_access;
// Delegate to the underlying implementation.
DCHECK_EQ(state(), State::kReadyToExecute);
ScopedTimer t(&time_taken_to_execute_);
- return UpdateState(ExecuteJobImpl(stats), State::kReadyToFinalize);
+ return UpdateState(ExecuteJobImpl(stats, local_isolate),
+ State::kReadyToFinalize);
}
CompilationJob::Status OptimizedCompilationJob::FinalizeJob(Isolate* isolate) {
@@ -480,7 +493,7 @@ bool UseAsmWasm(FunctionLiteral* literal, bool asm_wasm_broken) {
void InstallInterpreterTrampolineCopy(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info) {
DCHECK(FLAG_interpreted_frames_native_stack);
- if (!shared_info->function_data().IsBytecodeArray()) {
+ if (!shared_info->function_data(kAcquireLoad).IsBytecodeArray()) {
DCHECK(!shared_info->HasBytecodeArray());
return;
}
@@ -515,19 +528,6 @@ void InstallInterpreterTrampolineCopy(Isolate* isolate,
script_name, line_num, column_num));
}
-void InstallCoverageInfo(Isolate* isolate, Handle<SharedFunctionInfo> shared,
- Handle<CoverageInfo> coverage_info) {
- DCHECK(isolate->is_block_code_coverage());
- isolate->debug()->InstallCoverageInfo(shared, coverage_info);
-}
-
-void InstallCoverageInfo(LocalIsolate* isolate,
- Handle<SharedFunctionInfo> shared,
- Handle<CoverageInfo> coverage_info) {
- // We should only have coverage info when finalizing on the main thread.
- UNREACHABLE();
-}
-
template <typename LocalIsolate>
void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
Handle<SharedFunctionInfo> shared_info,
@@ -556,12 +556,6 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
shared_info->set_feedback_metadata(
ReadOnlyRoots(isolate).empty_feedback_metadata());
}
-
- if (compilation_info->has_coverage_info() &&
- !shared_info->HasCoverageInfo()) {
- InstallCoverageInfo(isolate, shared_info,
- compilation_info->coverage_info());
- }
}
void LogUnoptimizedCompilation(Isolate* isolate,
@@ -612,12 +606,10 @@ void UpdateSharedFunctionFlagsAfterCompilation(FunctionLiteral* literal,
shared_info.set_class_scope_has_private_brand(
literal->class_scope_has_private_brand());
- shared_info.set_is_safe_to_skip_arguments_adaptor(
- literal->SafeToSkipArgumentsAdaptor());
shared_info.set_has_static_private_methods_or_accessors(
literal->has_static_private_methods_or_accessors());
- shared_info.set_scope_info(*literal->scope()->scope_info());
+ shared_info.SetScopeInfo(*literal->scope()->scope_info());
}
// Finalize a single compilation job. This function can return
@@ -634,12 +626,20 @@ CompilationJob::Status FinalizeSingleUnoptimizedCompilationJob(
CompilationJob::Status status = job->FinalizeJob(shared_info, isolate);
if (status == CompilationJob::SUCCEEDED) {
InstallUnoptimizedCode(compilation_info, shared_info, isolate);
+
+ MaybeHandle<CoverageInfo> coverage_info;
+ if (compilation_info->has_coverage_info() &&
+ !shared_info->HasCoverageInfo()) {
+ coverage_info = compilation_info->coverage_info();
+ }
+
finalize_unoptimized_compilation_data_list->emplace_back(
- isolate, shared_info, job->time_taken_to_execute(),
+ isolate, shared_info, coverage_info, job->time_taken_to_execute(),
job->time_taken_to_finalize());
}
- DCHECK_IMPLIES(status == CompilationJob::RETRY_ON_MAIN_THREAD,
- (std::is_same<LocalIsolate, LocalIsolate>::value));
+ DCHECK_IMPLIES(
+ status == CompilationJob::RETRY_ON_MAIN_THREAD,
+ (std::is_same<LocalIsolate, v8::internal::LocalIsolate>::value));
return status;
}
@@ -833,7 +833,7 @@ bool FinalizeDeferredUnoptimizedCompilationJobs(
}
V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
- Handle<JSFunction> function, BailoutId osr_offset) {
+ Handle<JSFunction> function, BailoutId osr_offset, CodeKind code_kind) {
RuntimeCallTimerScope runtimeTimer(
function->GetIsolate(),
RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
@@ -852,13 +852,13 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
.GetOSROptimizedCodeCache()
.GetOptimizedCode(shared, osr_offset, isolate);
}
- if (!code.is_null()) {
+ DCHECK_IMPLIES(!code.is_null(), code.kind() <= code_kind);
+ if (!code.is_null() && code.kind() == code_kind) {
// Caching of optimized code enabled and optimized code found.
DCHECK(!code.marked_for_deoptimization());
DCHECK(function->shared().is_compiled());
DCHECK(CodeKindIsStoredInOptimizedCodeCache(code.kind()));
- DCHECK_IMPLIES(!osr_offset.IsNone(),
- code.kind() == CodeKind::OPTIMIZED_FUNCTION);
+ DCHECK_IMPLIES(!osr_offset.IsNone(), CodeKindCanOSR(code.kind()));
return Handle<Code>(code, isolate);
}
return MaybeHandle<Code>();
@@ -902,7 +902,7 @@ void InsertCodeIntoOptimizedCodeCache(
handle(function->feedback_vector(), function->GetIsolate());
FeedbackVector::SetOptimizedCode(vector, code);
} else {
- DCHECK_EQ(kind, CodeKind::OPTIMIZED_FUNCTION);
+ DCHECK(CodeKindCanOSR(kind));
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
compilation_info->osr_offset());
}
@@ -952,10 +952,21 @@ bool GetOptimizedCodeNow(OptimizedCompilationJob* job, Isolate* isolate,
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OptimizeNonConcurrent");
- if (!PrepareJobWithHandleScope(job, isolate, compilation_info) ||
- job->ExecuteJob(isolate->counters()->runtime_call_stats()) !=
- CompilationJob::SUCCEEDED ||
- job->FinalizeJob(isolate) != CompilationJob::SUCCEEDED) {
+ if (!PrepareJobWithHandleScope(job, isolate, compilation_info)) {
+ CompilerTracer::TraceAbortedJob(isolate, compilation_info);
+ return false;
+ }
+
+ {
+ LocalIsolate local_isolate(isolate, ThreadKind::kMain);
+ if (job->ExecuteJob(isolate->counters()->runtime_call_stats(),
+ &local_isolate)) {
+ CompilerTracer::TraceAbortedJob(isolate, compilation_info);
+ return false;
+ }
+ }
+
+ if (job->FinalizeJob(isolate) != CompilationJob::SUCCEEDED) {
CompilerTracer::TraceAbortedJob(isolate, compilation_info);
return false;
}
@@ -996,8 +1007,9 @@ bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OptimizeConcurrentPrepare");
- if (!PrepareJobWithHandleScope(job.get(), isolate, compilation_info))
+ if (!PrepareJobWithHandleScope(job.get(), isolate, compilation_info)) {
return false;
+ }
// The background recompile will own this job.
isolate->optimizing_compile_dispatcher()->QueueForOptimization(job.get());
@@ -1012,7 +1024,12 @@ bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
if (CodeKindIsStoredInOptimizedCodeCache(code_kind)) {
function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
}
- DCHECK(function->ActiveTierIsIgnition() || function->ActiveTierIsNCI());
+
+ // Note: Usually the active tier is expected to be Ignition or NCI at this
+ // point (in other words we don't expect to optimize if the function is
+ // already TF-optimized). There is a special case for OSR though, for which
+ // we *can* reach this point even if we've already generated non-OSR'd TF
+ // code.
DCHECK(function->shared().HasBytecodeArray());
return true;
}
@@ -1028,6 +1045,18 @@ Handle<Code> ContinuationForConcurrentOptimization(
// Tiering up to Turbofan and cached optimized code exists. Continue
// execution there until TF optimization has finished.
return cached_code;
+ } else if (FLAG_turboprop_as_midtier &&
+ function->HasAvailableOptimizedCode()) {
+ DCHECK(function->NextTier() == CodeKind::TURBOFAN);
+ // It is possible that we have marked a closure for TurboFan optimization
+ // but the marker is processed by another closure that doesn't have
+ // optimized code yet. So heal the closure here and return the optimized
+ // code.
+ if (!function->HasAttachedOptimizedCode()) {
+ DCHECK(function->feedback_vector().has_optimized_code());
+ function->set_code(function->feedback_vector().optimized_code());
+ }
+ return handle(function->code(), isolate);
}
return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
}
@@ -1074,9 +1103,10 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// Check the optimized code cache (stored on the SharedFunctionInfo).
if (CodeKindIsStoredInOptimizedCodeCache(code_kind)) {
Handle<Code> cached_code;
- if (GetCodeFromOptimizedCodeCache(function, osr_offset)
+ if (GetCodeFromOptimizedCodeCache(function, osr_offset, code_kind)
.ToHandle(&cached_code)) {
- CompilerTracer::TraceOptimizedCodeCacheHit(isolate, function, osr_offset);
+ CompilerTracer::TraceOptimizedCodeCacheHit(isolate, function, osr_offset,
+ code_kind);
return cached_code;
}
}
@@ -1089,7 +1119,8 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// contexts).
if (CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
DCHECK(osr_offset.IsNone());
- DCHECK(FLAG_turbo_nci_as_midtier || shared->has_optimized_at_least_once());
+ DCHECK(FLAG_turbo_nci_as_midtier || !FLAG_turbo_nci_delayed_codegen ||
+ shared->has_optimized_at_least_once());
Handle<Code> cached_code;
if (GetCodeFromCompilationCache(isolate, shared).ToHandle(&cached_code)) {
@@ -1203,6 +1234,10 @@ void FinalizeUnoptimizedCompilation(
if (FLAG_interpreted_frames_native_stack) {
InstallInterpreterTrampolineCopy(isolate, shared_info);
}
+ Handle<CoverageInfo> coverage_info;
+ if (finalize_data.coverage_info().ToHandle(&coverage_info)) {
+ isolate->debug()->InstallCoverageInfo(shared_info, coverage_info);
+ }
LogUnoptimizedCompilation(isolate, shared_info, flags,
finalize_data.time_taken_to_execute(),
@@ -1395,11 +1430,14 @@ CompilationHandleScope::~CompilationHandleScope() {
FinalizeUnoptimizedCompilationData::FinalizeUnoptimizedCompilationData(
LocalIsolate* isolate, Handle<SharedFunctionInfo> function_handle,
+ MaybeHandle<CoverageInfo> coverage_info,
base::TimeDelta time_taken_to_execute,
base::TimeDelta time_taken_to_finalize)
: time_taken_to_execute_(time_taken_to_execute),
time_taken_to_finalize_(time_taken_to_finalize),
- function_handle_(isolate->heap()->NewPersistentHandle(function_handle)) {}
+ function_handle_(isolate->heap()->NewPersistentHandle(function_handle)),
+ coverage_info_(isolate->heap()->NewPersistentMaybeHandle(coverage_info)) {
+}
DeferredFinalizationJobData::DeferredFinalizationJobData(
LocalIsolate* isolate, Handle<SharedFunctionInfo> function_handle,
@@ -1414,7 +1452,7 @@ BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* streamed_data,
REPLMode::kNo)),
compile_state_(isolate),
info_(std::make_unique<ParseInfo>(isolate, flags_, &compile_state_)),
- isolate_for_local_isolate_(nullptr),
+ isolate_for_local_isolate_(isolate),
start_position_(0),
end_position_(0),
function_literal_id_(kFunctionLiteralIdTopLevel),
@@ -1434,13 +1472,6 @@ BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* streamed_data,
std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
streamed_data->source_stream.get(), streamed_data->encoding));
info_->set_character_stream(std::move(stream));
-
- // TODO(leszeks): Add block coverage support to off-thread finalization.
- finalize_on_background_thread_ =
- FLAG_finalize_streaming_on_background && !flags_.block_coverage_enabled();
- if (finalize_on_background_thread()) {
- isolate_for_local_isolate_ = isolate;
- }
}
BackgroundCompileTask::BackgroundCompileTask(
@@ -1460,8 +1491,7 @@ BackgroundCompileTask::BackgroundCompileTask(
stack_size_(max_stack_size),
worker_thread_runtime_call_stats_(worker_thread_runtime_stats),
timer_(timer),
- language_mode_(info_->language_mode()),
- finalize_on_background_thread_(false) {
+ language_mode_(info_->language_mode()) {
DCHECK_EQ(outer_parse_info->parameters_end_pos(), kNoSourcePosition);
DCHECK_NULL(outer_parse_info->extension());
@@ -1545,7 +1575,7 @@ void BackgroundCompileTask::Run() {
// Save the language mode.
language_mode_ = info_->language_mode();
- if (!finalize_on_background_thread_) {
+ if (!FLAG_finalize_streaming_on_background) {
if (info_->literal() != nullptr) {
CompileOnBackgroundThread(info_.get(), compile_state_.allocator(),
&compilation_jobs_);
@@ -1553,7 +1583,8 @@ void BackgroundCompileTask::Run() {
} else {
DCHECK(info_->flags().is_toplevel());
- LocalIsolate isolate(isolate_for_local_isolate_);
+ LocalIsolate isolate(isolate_for_local_isolate_, ThreadKind::kBackground);
+ UnparkedScope unparked_scope(isolate.heap());
LocalHandleScope handle_scope(&isolate);
info_->ast_value_factory()->Internalize(&isolate);
@@ -1701,8 +1732,8 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
shared_info->GetDebugInfo().HasInstrumentedBytecodeArray()) {
ByteArray source_position_table =
job->compilation_info()->bytecode_array()->SourcePositionTable();
- shared_info->GetDebugBytecodeArray().set_synchronized_source_position_table(
- source_position_table);
+ shared_info->GetDebugBytecodeArray().set_source_position_table(
+ source_position_table, kReleaseStore);
}
DCHECK(!isolate->has_pending_exception());
@@ -1815,7 +1846,8 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
// Optimize now if --always-opt is enabled.
if (FLAG_always_opt && !function->shared().HasAsmWasmData()) {
- CompilerTracer::TraceOptimizeForAlwaysOpt(isolate, function);
+ CompilerTracer::TraceOptimizeForAlwaysOpt(isolate, function,
+ CodeKindForTopTier());
Handle<Code> maybe_code;
if (GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent,
@@ -1839,7 +1871,7 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
bool Compiler::FinalizeBackgroundCompileTask(
BackgroundCompileTask* task, Handle<SharedFunctionInfo> shared_info,
Isolate* isolate, ClearExceptionFlag flag) {
- DCHECK(!task->finalize_on_background_thread());
+ DCHECK(!FLAG_finalize_streaming_on_background);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.FinalizeBackgroundCompileTask");
@@ -2067,21 +2099,27 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate, Handle<Context> context,
}
// Check whether embedder allows code generation in this context.
-// (via v8::Isolate::SetModifyCodeGenerationFromStringsCallback)
+// (via v8::Isolate::SetModifyCodeGenerationFromStringsCallback
+// or v8::Isolate::SetModifyCodeGenerationFromStringsCallback2)
bool ModifyCodeGenerationFromStrings(Isolate* isolate, Handle<Context> context,
- Handle<i::Object>* source) {
- DCHECK(isolate->modify_code_gen_callback());
+ Handle<i::Object>* source,
+ bool is_code_like) {
+ DCHECK(isolate->modify_code_gen_callback() ||
+ isolate->modify_code_gen_callback2());
DCHECK(source);
// Callback set. Run it, and use the return value as source, or block
// execution if it's not set.
VMState<EXTERNAL> state(isolate);
- ModifyCodeGenerationFromStringsCallback modify_callback =
- isolate->modify_code_gen_callback();
RuntimeCallTimerScope timer(
isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks);
ModifyCodeGenerationFromStringsResult result =
- modify_callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(*source));
+ isolate->modify_code_gen_callback()
+ ? isolate->modify_code_gen_callback()(v8::Utils::ToLocal(context),
+ v8::Utils::ToLocal(*source))
+ : isolate->modify_code_gen_callback2()(v8::Utils::ToLocal(context),
+ v8::Utils::ToLocal(*source),
+ is_code_like);
if (result.codegen_allowed && !result.modified_source.IsEmpty()) {
// Use the new source (which might be the same as the old source).
*source =
@@ -2107,7 +2145,7 @@ bool ModifyCodeGenerationFromStrings(Isolate* isolate, Handle<Context> context,
// static
std::pair<MaybeHandle<String>, bool> Compiler::ValidateDynamicCompilationSource(
Isolate* isolate, Handle<Context> context,
- Handle<i::Object> original_source) {
+ Handle<i::Object> original_source, bool is_code_like) {
// Check if the context unconditionally allows code gen from strings.
// allow_code_gen_from_strings can be many things, so we'll always check
// against the 'false' literal, so that e.g. undefined and 'true' are treated
@@ -2121,6 +2159,11 @@ std::pair<MaybeHandle<String>, bool> Compiler::ValidateDynamicCompilationSource(
// allow_code_gen_callback only allows proper strings.
// (I.e., let allow_code_gen_callback decide, if it has been set.)
if (isolate->allow_code_gen_callback()) {
+ // If we run into this condition, the embedder has marked some object
+ // templates as "code like", but has given us a callback that only accepts
+ // strings. That makes no sense.
+ DCHECK(!original_source->IsCodeLike(isolate));
+
if (!original_source->IsString()) {
return {MaybeHandle<String>(), true};
}
@@ -2134,9 +2177,11 @@ std::pair<MaybeHandle<String>, bool> Compiler::ValidateDynamicCompilationSource(
// Check if the context wants to block or modify this source object.
// Double-check that we really have a string now.
// (Let modify_code_gen_callback decide, if it's been set.)
- if (isolate->modify_code_gen_callback()) {
+ if (isolate->modify_code_gen_callback() ||
+ isolate->modify_code_gen_callback2()) {
Handle<i::Object> modified_source = original_source;
- if (!ModifyCodeGenerationFromStrings(isolate, context, &modified_source)) {
+ if (!ModifyCodeGenerationFromStrings(isolate, context, &modified_source,
+ is_code_like)) {
return {MaybeHandle<String>(), false};
}
if (!modified_source->IsString()) {
@@ -2145,6 +2190,15 @@ std::pair<MaybeHandle<String>, bool> Compiler::ValidateDynamicCompilationSource(
return {Handle<String>::cast(modified_source), false};
}
+ if (!context->allow_code_gen_from_strings().IsFalse(isolate) &&
+ original_source->IsCodeLike(isolate)) {
+ // Codegen is unconditionally allowed, and we're been given a CodeLike
+ // object. Stringify.
+ MaybeHandle<String> stringified_source =
+ Object::ToString(isolate, original_source);
+ return {stringified_source, stringified_source.is_null()};
+ }
+
// If unconditional codegen was disabled, and no callback defined, we block
// strings and allow all other objects.
return {MaybeHandle<String>(), !original_source->IsString()};
@@ -2181,12 +2235,13 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromValidatedString(
// static
MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
Handle<Context> context, Handle<Object> source,
- ParseRestriction restriction, int parameters_end_pos) {
+ ParseRestriction restriction, int parameters_end_pos, bool is_code_like) {
Isolate* const isolate = context->GetIsolate();
- Handle<Context> native_context(context->native_context(), isolate);
- return GetFunctionFromValidatedString(
- context, ValidateDynamicCompilationSource(isolate, context, source).first,
- restriction, parameters_end_pos);
+ MaybeHandle<String> validated_source =
+ ValidateDynamicCompilationSource(isolate, context, source, is_code_like)
+ .first;
+ return GetFunctionFromValidatedString(context, validated_source, restriction,
+ parameters_end_pos);
}
namespace {
@@ -2820,7 +2875,7 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
// the isolate cache.
Handle<Script> script;
- if (task->finalize_on_background_thread()) {
+ if (FLAG_finalize_streaming_on_background) {
RuntimeCallTimerScope runtimeTimerScope(
isolate, RuntimeCallCounterId::kCompilePublishBackgroundFinalization);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
@@ -3055,6 +3110,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
if (FLAG_always_opt && shared->allows_lazy_compilation() &&
!shared->optimization_disabled() &&
!function->HasAvailableOptimizedCode()) {
+ CompilerTracer::TraceMarkForAlwaysOpt(isolate, function);
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
}
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index 1e3ed00f93..c599841a01 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -16,6 +16,7 @@
#include "src/handles/persistent-handles.h"
#include "src/logging/code-events.h"
#include "src/objects/contexts.h"
+#include "src/objects/debug-objects.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/pending-compilation-error-handler.h"
#include "src/utils/allocation.h"
@@ -141,13 +142,14 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// Create a (bound) function for a String source within a context for eval.
V8_WARN_UNUSED_RESULT static MaybeHandle<JSFunction> GetFunctionFromString(
Handle<Context> context, Handle<i::Object> source,
- ParseRestriction restriction, int parameters_end_pos);
+ ParseRestriction restriction, int parameters_end_pos, bool is_code_like);
// Decompose GetFunctionFromString into two functions, to allow callers to
// deal seperately with a case of object not handled by the embedder.
V8_WARN_UNUSED_RESULT static std::pair<MaybeHandle<String>, bool>
ValidateDynamicCompilationSource(Isolate* isolate, Handle<Context> context,
- Handle<i::Object> source_object);
+ Handle<i::Object> source_object,
+ bool is_code_like = false);
V8_WARN_UNUSED_RESULT static MaybeHandle<JSFunction>
GetFunctionFromValidatedString(Handle<Context> context,
MaybeHandle<String> source,
@@ -332,7 +334,8 @@ class OptimizedCompilationJob : public CompilationJob {
// Executes the compile job. Can be called on a background thread if
// can_execute_on_background_thread() returns true.
- V8_WARN_UNUSED_RESULT Status ExecuteJob(RuntimeCallStats* stats);
+ V8_WARN_UNUSED_RESULT Status
+ ExecuteJob(RuntimeCallStats* stats, LocalIsolate* local_isolate = nullptr);
// Finalizes the compile job. Must be called on the main thread.
V8_WARN_UNUSED_RESULT Status FinalizeJob(Isolate* isolate);
@@ -357,7 +360,8 @@ class OptimizedCompilationJob : public CompilationJob {
protected:
// Overridden by the actual implementation.
virtual Status PrepareJobImpl(Isolate* isolate) = 0;
- virtual Status ExecuteJobImpl(RuntimeCallStats* stats) = 0;
+ virtual Status ExecuteJobImpl(RuntimeCallStats* stats,
+ LocalIsolate* local_heap) = 0;
virtual Status FinalizeJobImpl(Isolate* isolate) = 0;
private:
@@ -372,14 +376,17 @@ class FinalizeUnoptimizedCompilationData {
public:
FinalizeUnoptimizedCompilationData(Isolate* isolate,
Handle<SharedFunctionInfo> function_handle,
+ MaybeHandle<CoverageInfo> coverage_info,
base::TimeDelta time_taken_to_execute,
base::TimeDelta time_taken_to_finalize)
: time_taken_to_execute_(time_taken_to_execute),
time_taken_to_finalize_(time_taken_to_finalize),
- function_handle_(function_handle) {}
+ function_handle_(function_handle),
+ coverage_info_(coverage_info) {}
FinalizeUnoptimizedCompilationData(LocalIsolate* isolate,
Handle<SharedFunctionInfo> function_handle,
+ MaybeHandle<CoverageInfo> coverage_info,
base::TimeDelta time_taken_to_execute,
base::TimeDelta time_taken_to_finalize);
@@ -387,6 +394,8 @@ class FinalizeUnoptimizedCompilationData {
return function_handle_;
}
+ MaybeHandle<CoverageInfo> coverage_info() const { return coverage_info_; }
+
base::TimeDelta time_taken_to_execute() const {
return time_taken_to_execute_;
}
@@ -398,6 +407,7 @@ class FinalizeUnoptimizedCompilationData {
base::TimeDelta time_taken_to_execute_;
base::TimeDelta time_taken_to_finalize_;
Handle<SharedFunctionInfo> function_handle_;
+ MaybeHandle<CoverageInfo> coverage_info_;
};
using FinalizeUnoptimizedCompilationDataList =
@@ -474,9 +484,6 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
UnoptimizedCompileFlags flags() const { return flags_; }
UnoptimizedCompileState* compile_state() { return &compile_state_; }
LanguageMode language_mode() { return language_mode_; }
- bool finalize_on_background_thread() {
- return finalize_on_background_thread_;
- }
FinalizeUnoptimizedCompilationDataList*
finalize_unoptimized_compilation_data() {
return &finalize_unoptimized_compilation_data_;
@@ -527,13 +534,6 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
TimedHistogram* timer_;
LanguageMode language_mode_;
- // True if the background compilation should be finalized on the background
- // thread. When this is true, the ParseInfo, Parser and compilation jobs are
- // freed on the background thread, the outer_function_sfi holds the top-level
- // function, and the off_thread_isolate has to be merged into the main-thread
- // Isolate.
- bool finalize_on_background_thread_;
-
DISALLOW_COPY_AND_ASSIGN(BackgroundCompileTask);
};
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index ba71702e7c..499e5c5f37 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -121,6 +121,13 @@ ExternalReference ExternalReference::handle_scope_implementer_address(
return ExternalReference(isolate->handle_scope_implementer_address());
}
+#ifdef V8_HEAP_SANDBOX
+ExternalReference ExternalReference::external_pointer_table_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->external_pointer_table_address());
+}
+#endif
+
ExternalReference ExternalReference::interpreter_dispatch_table_address(
Isolate* isolate) {
return ExternalReference(isolate->interpreter()->dispatch_table_address());
@@ -468,6 +475,11 @@ ExternalReference ExternalReference::address_of_double_neg_constant() {
return ExternalReference(reinterpret_cast<Address>(&double_negate_constant));
}
+ExternalReference
+ExternalReference::address_of_enable_experimental_regexp_engine() {
+ return ExternalReference(&FLAG_enable_experimental_regexp_engine);
+}
+
ExternalReference ExternalReference::is_profiling_address(Isolate* isolate) {
return ExternalReference(isolate->is_profiling_address());
}
@@ -941,6 +953,11 @@ FUNCTION_REFERENCE(
js_finalization_registry_remove_cell_from_unregister_token_map,
JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap)
+#ifdef V8_HEAP_SANDBOX
+FUNCTION_REFERENCE(external_pointer_table_grow_table_function,
+ ExternalPointerTable::GrowTable)
+#endif
+
bool operator==(ExternalReference lhs, ExternalReference rhs) {
return lhs.address() == rhs.address();
}
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index e35e12237b..72a3397007 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -84,12 +84,24 @@ class StatsCounter;
V(re_check_stack_guard_state, \
"RegExpMacroAssembler*::CheckStackGuardState()") \
V(re_grow_stack, "NativeRegExpMacroAssembler::GrowStack()") \
- V(re_word_character_map, "NativeRegExpMacroAssembler::word_character_map")
+ V(re_word_character_map, "NativeRegExpMacroAssembler::word_character_map") \
+ EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_HEAP_SANDBOX(V)
+
+#ifdef V8_HEAP_SANDBOX
+#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_HEAP_SANDBOX(V) \
+ V(external_pointer_table_address, \
+ "Isolate::external_pointer_table_address(" \
+ ")")
+#else
+#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_HEAP_SANDBOX(V)
+#endif // V8_HEAP_SANDBOX
#define EXTERNAL_REFERENCE_LIST(V) \
V(abort_with_reason, "abort_with_reason") \
V(address_of_double_abs_constant, "double_absolute_constant") \
V(address_of_double_neg_constant, "double_negate_constant") \
+ V(address_of_enable_experimental_regexp_engine, \
+ "address_of_enable_experimental_regexp_engine") \
V(address_of_float_abs_constant, "float_absolute_constant") \
V(address_of_float_neg_constant, "float_negate_constant") \
V(address_of_min_int, "LDoubleConstant::min_int") \
@@ -233,7 +245,8 @@ class StatsCounter;
V(re_match_for_call_from_js, "IrregexpInterpreter::MatchForCallFromJs") \
V(re_experimental_match_for_call_from_js, \
"ExperimentalRegExp::MatchForCallFromJs") \
- EXTERNAL_REFERENCE_LIST_INTL(V)
+ EXTERNAL_REFERENCE_LIST_INTL(V) \
+ EXTERNAL_REFERENCE_LIST_HEAP_SANDBOX(V)
#ifdef V8_INTL_SUPPORT
#define EXTERNAL_REFERENCE_LIST_INTL(V) \
@@ -243,6 +256,14 @@ class StatsCounter;
#define EXTERNAL_REFERENCE_LIST_INTL(V)
#endif // V8_INTL_SUPPORT
+#ifdef V8_HEAP_SANDBOX
+#define EXTERNAL_REFERENCE_LIST_HEAP_SANDBOX(V) \
+ V(external_pointer_table_grow_table_function, \
+ "ExternalPointerTable::GrowTable")
+#else
+#define EXTERNAL_REFERENCE_LIST_HEAP_SANDBOX(V)
+#endif // V8_HEAP_SANDBOX
+
// An ExternalReference represents a C++ address used in the generated
// code. All references to C++ functions and variables must be encapsulated
// in an ExternalReference instance. This is done in order to track the
diff --git a/deps/v8/src/codegen/handler-table.cc b/deps/v8/src/codegen/handler-table.cc
index fb49f9fa70..8aec047d13 100644
--- a/deps/v8/src/codegen/handler-table.cc
+++ b/deps/v8/src/codegen/handler-table.cc
@@ -142,7 +142,7 @@ int HandlerTable::LengthForRange(int entries) {
// static
int HandlerTable::EmitReturnTableStart(Assembler* masm) {
- masm->DataAlign(sizeof(int32_t)); // Make sure entries are aligned.
+ masm->DataAlign(Code::kMetadataAlignment);
masm->RecordComment(";;; Exception handler table.");
int table_start = masm->pc_offset();
return table_start;
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index 321a59cede..f19c8dd1cd 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -302,6 +302,15 @@ Assembler::Assembler(const AssemblerOptions& options,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
const int code_comments_size = WriteCodeComments();
// Finalize code (at this point overflow() may be true, but the gap ensures
@@ -510,13 +519,6 @@ void Assembler::pop(Operand dst) {
emit_operand(eax, dst);
}
-void Assembler::enter(const Immediate& size) {
- EnsureSpace ensure_space(this);
- EMIT(0xC8);
- emit_w(size);
- EMIT(0);
-}
-
void Assembler::leave() {
EnsureSpace ensure_space(this);
EMIT(0xC9);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index ab26d36376..333daf6da3 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -474,7 +474,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void pop(Register dst);
void pop(Operand dst);
- void enter(const Immediate& size);
void leave();
// Moves
diff --git a/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc b/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
index 0177e36c4b..ee9c3aac1c 100644
--- a/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
@@ -294,54 +294,6 @@ void WasmFloat64ToNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 3);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 9558cf540d..b615c59185 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -33,16 +33,9 @@ namespace internal {
Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
DCHECK_GE(index, 0);
-#ifdef V8_REVERSE_JSARGS
// arg[0] = esp + kPCOnStackSize;
// arg[i] = arg[0] + i * kSystemPointerSize;
return Operand(esp, kPCOnStackSize + index * kSystemPointerSize);
-#else
- // arg[0] = (esp + kPCOnStackSize) + argc * kSystemPointerSize;
- // arg[i] = arg[0] - i * kSystemPointerSize;
- return Operand(esp, argc_, times_system_pointer_size,
- kPCOnStackSize - index * kSystemPointerSize);
-#endif
}
// -------------------------------------------------------------------------
@@ -1119,15 +1112,127 @@ void TurboAssembler::PrepareForTailCall(
mov(esp, new_sp_reg);
}
+void MacroAssembler::CompareStackLimit(Register with, StackLimitKind kind) {
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ // Address through the root register. No load is needed.
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ cmp(with, Operand(kRootRegister, offset));
+}
+
+void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
+ Label* stack_overflow,
+ bool include_receiver) {
+ DCHECK_NE(num_args, scratch);
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_jslimit(isolate());
+ // Compute the space that is left as a negative number in scratch. If
+ // we already overflowed, this will be a positive number.
+ mov(scratch, ExternalReferenceAsOperand(real_stack_limit, scratch));
+ sub(scratch, esp);
+ // TODO(victorgomes): Remove {include_receiver} and always require one extra
+ // word of the stack space.
+ lea(scratch, Operand(scratch, num_args, times_system_pointer_size, 0));
+ if (include_receiver) {
+ add(scratch, Immediate(kSystemPointerSize));
+ }
+ // See if we overflowed, i.e. scratch is positive.
+ cmp(scratch, Immediate(0));
+ // TODO(victorgomes): Save some bytes in the builtins that use stack checks
+ // by jumping to a builtin that throws the exception.
+ j(greater, stack_overflow); // Signed comparison.
+}
+
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeFlag flag) {
- DCHECK_EQ(actual_parameter_count, eax);
-
if (expected_parameter_count != actual_parameter_count) {
+ DCHECK_EQ(actual_parameter_count, eax);
DCHECK_EQ(expected_parameter_count, ecx);
-
Label regular_invoke;
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // If the expected parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
+ cmp(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
+ j(equal, &regular_invoke, Label::kFar);
+
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
+ sub(expected_parameter_count, actual_parameter_count);
+ j(less_equal, &regular_invoke, Label::kFar);
+
+ // We need to preserve edx, edi, esi and ebx.
+ movd(xmm0, edx);
+ movd(xmm1, edi);
+ movd(xmm2, esi);
+ movd(xmm3, ebx);
+
+ Label stack_overflow;
+ StackOverflowCheck(expected_parameter_count, edx, &stack_overflow);
+
+ Register scratch = esi;
+
+ // Underapplication. Move the arguments already in the stack, including the
+ // receiver and the return address.
+ {
+ Label copy, check;
+ Register src = edx, dest = esp, num = edi, current = ebx;
+ mov(src, esp);
+ lea(scratch,
+ Operand(expected_parameter_count, times_system_pointer_size, 0));
+ AllocateStackSpace(scratch);
+ // Extra words are the receiver and the return address (if a jump).
+ int extra_words = flag == CALL_FUNCTION ? 1 : 2;
+ lea(num, Operand(eax, extra_words)); // Number of words to copy.
+ Set(current, 0);
+ // Fall-through to the loop body because there are non-zero words to copy.
+ bind(&copy);
+ mov(scratch, Operand(src, current, times_system_pointer_size, 0));
+ mov(Operand(dest, current, times_system_pointer_size, 0), scratch);
+ inc(current);
+ bind(&check);
+ cmp(current, num);
+ j(less, &copy);
+ lea(edx, Operand(esp, num, times_system_pointer_size, 0));
+ }
+
+ // Fill remaining expected arguments with undefined values.
+ movd(ebx, xmm3); // Restore root.
+ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ {
+ Label loop;
+ bind(&loop);
+ dec(expected_parameter_count);
+ mov(Operand(edx, expected_parameter_count, times_system_pointer_size, 0),
+ scratch);
+ j(greater, &loop, Label::kNear);
+ }
+
+ // Restore remaining registers.
+ movd(esi, xmm2);
+ movd(edi, xmm1);
+ movd(edx, xmm0);
+
+ jmp(&regular_invoke);
+
+ bind(&stack_overflow);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ CallRuntime(Runtime::kThrowStackOverflow);
+ int3(); // This should be unreachable.
+ }
+#else
cmp(expected_parameter_count, actual_parameter_count);
j(equal, &regular_invoke);
Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
@@ -1137,6 +1242,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
}
+#endif
bind(&regular_invoke);
}
}
@@ -1158,13 +1264,7 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Push(fun);
Push(fun);
// Arguments are located 2 words below the base pointer.
-#ifdef V8_REVERSE_JSARGS
Operand receiver_op = Operand(ebp, kSystemPointerSize * 2);
-#else
- Operand receiver_op =
- Operand(ebp, actual_parameter_count, times_system_pointer_size,
- kSystemPointerSize * 2);
-#endif
Push(receiver_op);
CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
@@ -1183,7 +1283,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register actual_parameter_count,
InvokeFlag flag) {
// You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
DCHECK_EQ(function, edi);
DCHECK_IMPLIES(new_target.is_valid(), new_target == edx);
DCHECK(expected_parameter_count == ecx || expected_parameter_count == eax);
@@ -1197,7 +1297,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
push(eax);
cmpb(ExternalReferenceAsOperand(debug_hook_active, eax), Immediate(0));
pop(eax);
- j(not_equal, &debug_hook, Label::kNear);
+ j(not_equal, &debug_hook);
}
bind(&continue_after_hook);
@@ -1225,7 +1325,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
bind(&debug_hook);
CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
actual_parameter_count);
- jmp(&continue_after_hook, Label::kNear);
+ jmp(&continue_after_hook);
bind(&done);
}
@@ -1501,15 +1601,20 @@ void TurboAssembler::Psignd(XMMRegister dst, Operand src) {
FATAL("no AVX or SSE3 support");
}
-void TurboAssembler::Pshufb(XMMRegister dst, Operand src) {
+void TurboAssembler::Pshufb(XMMRegister dst, XMMRegister src, Operand mask) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- vpshufb(dst, dst, src);
+ vpshufb(dst, src, mask);
return;
}
if (CpuFeatures::IsSupported(SSSE3)) {
+ // Make sure these are different so that we won't overwrite mask.
+ DCHECK(!mask.is_reg(dst));
CpuFeatureScope sse_scope(this, SSSE3);
- pshufb(dst, src);
+ if (dst != src) {
+ movapd(dst, src);
+ }
+ pshufb(dst, mask);
return;
}
FATAL("no AVX or SSE3 support");
@@ -1884,8 +1989,7 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
Builtins::IsIsolateIndependentBuiltin(*code_object));
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index)) {
// Inline the trampoline.
CallBuiltin(builtin_index);
return;
@@ -1988,8 +2092,7 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
Builtins::IsIsolateIndependentBuiltin(*code_object));
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
@@ -2089,13 +2192,15 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
}
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- Label* exit, DeoptimizeKind kind) {
+void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
+ Label* exit, DeoptimizeKind kind,
+ Label*) {
+ CallBuiltin(target);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
USE(exit, kind);
- NoRootArrayScope no_root_array(this);
- // Save the deopt id in ebx (we don't need the roots array from now on).
- mov(ebx, deopt_id);
- call(target, RelocInfo::RUNTIME_ENTRY);
}
void TurboAssembler::Trap() { int3(); }
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index 72d574f14c..33635b09c5 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -24,6 +24,10 @@ using MemOperand = Operand;
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+// TODO(victorgomes): Move definition to macro-assembler.h, once all other
+// platforms are updated.
+enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
// Convenient class to access arguments below the stack pointer.
class StackArgumentsAccessor {
public:
@@ -130,8 +134,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Trap() override;
void DebugBreak() override;
- void CallForDeoptimization(Address target, int deopt_id, Label* exit,
- DeoptimizeKind kind);
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label,
@@ -479,8 +484,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef AVX_OP3_XO_SSE4
#undef AVX_OP3_WITH_TYPE_SCOPE
- void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, Operand(src)); }
- void Pshufb(XMMRegister dst, Operand src);
+ void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, dst, src); }
+ void Pshufb(XMMRegister dst, Operand src) { Pshufb(dst, dst, src); }
+ // Handles SSE and AVX. On SSE, moves src to dst if they are not equal.
+ void Pshufb(XMMRegister dst, XMMRegister src, XMMRegister mask) {
+ Pshufb(dst, src, Operand(mask));
+ }
+ void Pshufb(XMMRegister dst, XMMRegister src, Operand mask);
void Pblendw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
Pblendw(dst, Operand(src), imm8);
}
@@ -834,6 +844,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void IncrementCounter(StatsCounter* counter, int value, Register scratch);
void DecrementCounter(StatsCounter* counter, int value, Register scratch);
+ // ---------------------------------------------------------------------------
+ // Stack limit utilities
+ void CompareStackLimit(Register with, StackLimitKind kind);
+ void StackOverflowCheck(Register num_args, Register scratch,
+ Label* stack_overflow, bool include_receiver = false);
+
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
}
@@ -854,7 +870,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class StandardFrame;
+ friend class CommonFrame;
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index 8a6235fa08..79dad84077 100644
--- a/deps/v8/src/codegen/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -446,5 +446,45 @@ void BigIntToI32PairDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, kParameterCount);
}
+void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 3);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index d307502276..f086f23960 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -111,8 +111,7 @@ enum class StackArgumentOrder {
kJS, // Arguments in the stack are pushed in the same order as the one used
// by JS-to-JS function calls. This should be used if calling a
// JSFunction or if the builtin is expected to be called directly from a
- // JSFunction. When V8_REVERSE_JSARGS is set, this order is reversed
- // compared to kDefault.
+ // JSFunction. This order is reversed compared to kDefault.
};
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
@@ -506,9 +505,7 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
##__VA_ARGS__)
// When the extra arguments described here are located in the stack, they are
-// just above the return address in the frame. Therefore, they are either the
-// first arguments when V8_REVERSE_JSARGS is enabled, or otherwise the last
-// arguments.
+// just above the return address in the frame (first arguments).
#define DEFINE_JS_PARAMETERS(...) \
static constexpr int kDescriptorFlags = \
CallInterfaceDescriptorData::kAllowVarArgs; \
@@ -596,6 +593,12 @@ using DummyDescriptor = VoidDescriptor;
// Dummy descriptor that marks builtins with C calling convention.
using CCallDescriptor = VoidDescriptor;
+// Marks deoptimization entry builtins. Precise calling conventions currently
+// differ based on the platform.
+// TODO(jgruber): Once this is unified, we could create a better description
+// here.
+using DeoptimizationEntryDescriptor = VoidDescriptor;
+
class AllocateDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kRequestedSize)
@@ -1156,7 +1159,6 @@ class ArrayNoArgumentConstructorDescriptor
ArrayNArgumentsConstructorDescriptor)
};
-#ifdef V8_REVERSE_JSARGS
class ArraySingleArgumentConstructorDescriptor
: public ArrayNArgumentsConstructorDescriptor {
public:
@@ -1174,25 +1176,6 @@ class ArraySingleArgumentConstructorDescriptor
DECLARE_DESCRIPTOR(ArraySingleArgumentConstructorDescriptor,
ArrayNArgumentsConstructorDescriptor)
};
-#else
-class ArraySingleArgumentConstructorDescriptor
- : public ArrayNArgumentsConstructorDescriptor {
- public:
- // This descriptor declares same register arguments as the parent
- // ArrayNArgumentsConstructorDescriptor and it declares indices for
- // JS arguments passed on the expression stack.
- DEFINE_PARAMETERS(kFunction, kAllocationSite, kActualArgumentsCount,
- kReceiverParameter, kArraySizeSmiParameter)
- DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
- MachineType::AnyTagged(), // kAllocationSite
- MachineType::Int32(), // kActualArgumentsCount
- // JS arguments on the stack
- MachineType::AnyTagged(), // kReceiverParameter
- MachineType::AnyTagged()) // kArraySizeSmiParameter
- DECLARE_DESCRIPTOR(ArraySingleArgumentConstructorDescriptor,
- ArrayNArgumentsConstructorDescriptor)
-};
-#endif
class CompareDescriptor : public CallInterfaceDescriptor {
public:
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index 19a514b2d9..df46577db7 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -307,6 +307,15 @@ Assembler::Assembler(const AssemblerOptions& options,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
EmitForbiddenSlotInstruction();
int code_comments_size = WriteCodeComments();
@@ -3550,6 +3559,7 @@ void Assembler::GrowBuffer() {
buffer_ = std::move(new_buffer);
buffer_start_ = new_start;
pc_ += pc_delta;
+ last_call_pc_ += pc_delta;
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
diff --git a/deps/v8/src/codegen/mips/interface-descriptors-mips.cc b/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
index 132811a173..75835e607c 100644
--- a/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
@@ -304,54 +304,6 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 3);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index f9a0f7f076..37a6acadfe 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -2087,11 +2087,15 @@ void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
int32_t kFloat32MantissaBits = 23;
int32_t kFloat32ExponentBits = 8;
Label done;
+ if (!IsDoubleZeroRegSet()) {
+ Move(kDoubleRegZero, 0.0);
+ }
mfc1(scratch, src);
Ext(at, scratch, kFloat32MantissaBits, kFloat32ExponentBits);
Branch(USE_DELAY_SLOT, &done, hs, at,
Operand(kFloat32ExponentBias + kFloat32MantissaBits));
- mov_s(dst, src);
+ // Canonicalize the result.
+ sub_s(dst, src, kDoubleRegZero);
round(this, dst, src);
mfc1(at, dst);
Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
@@ -4110,9 +4114,19 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
}
void TurboAssembler::DropAndRet(int drop) {
- DCHECK(is_int16(drop * kPointerSize));
- Ret(USE_DELAY_SLOT);
- addiu(sp, sp, drop * kPointerSize);
+ int32_t drop_size = drop * kSystemPointerSize;
+ DCHECK(is_int31(drop_size));
+
+ if (is_int16(drop_size)) {
+ Ret(USE_DELAY_SLOT);
+ addiu(sp, sp, drop_size);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, drop_size);
+ Ret(USE_DELAY_SLOT);
+ addu(sp, sp, scratch);
+ }
}
void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1,
@@ -4373,23 +4387,107 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
mov(sp, dst_reg);
}
+void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ CHECK(is_int32(offset));
+ Lw(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
+}
+
+void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
+ Register scratch2,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+
+ LoadStackLimit(scratch1, StackLimitKind::kRealStackLimit);
+ // Make scratch1 the space we have left. The stack might already be overflowed
+ // here which will cause scratch1 to become negative.
+ subu(scratch1, sp, scratch1);
+ // Check if the arguments will overflow the stack.
+ sll(scratch2, num_args, kPointerSizeLog2);
+ // Signed comparison.
+ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+}
+
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeFlag flag) {
Label regular_invoke;
- // Check whether the expected and actual arguments count match. The
- // registers are set up according to contract with
- // ArgumentsAdaptorTrampoline:
// a0: actual arguments count
// a1: function (passed through to callee)
// a2: expected arguments count
- // The code below is made a lot easier because the calling code already sets
- // up actual and expected registers according to the contract.
DCHECK_EQ(actual_parameter_count, a0);
DCHECK_EQ(expected_parameter_count, a2);
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // If the expected parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
+ Branch(&regular_invoke, eq, expected_parameter_count,
+ Operand(kDontAdaptArgumentsSentinel));
+
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
+ Subu(expected_parameter_count, expected_parameter_count,
+ actual_parameter_count);
+ Branch(&regular_invoke, le, expected_parameter_count, Operand(zero_reg));
+
+ Label stack_overflow;
+ StackOverflowCheck(expected_parameter_count, t0, t1, &stack_overflow);
+ // Underapplication. Move the arguments already in the stack, including the
+ // receiver and the return address.
+ {
+ Label copy;
+ Register src = t3, dest = t4;
+ mov(src, sp);
+ sll(t0, expected_parameter_count, kSystemPointerSizeLog2);
+ Subu(sp, sp, Operand(t0));
+ // Update stack pointer.
+ mov(dest, sp);
+ mov(t0, a0);
+ bind(&copy);
+ Lw(t1, MemOperand(src, 0));
+ Sw(t1, MemOperand(dest, 0));
+ Subu(t0, t0, Operand(1));
+ Addu(src, src, Operand(kSystemPointerSize));
+ Addu(dest, dest, Operand(kSystemPointerSize));
+ Branch(&copy, ge, t0, Operand(zero_reg));
+ }
+
+ // Fill remaining expected arguments with undefined values.
+ LoadRoot(t0, RootIndex::kUndefinedValue);
+ {
+ Label loop;
+ bind(&loop);
+ Sw(t0, MemOperand(t4, 0));
+ Subu(expected_parameter_count, expected_parameter_count, Operand(1));
+ Addu(t4, t4, Operand(kSystemPointerSize));
+ Branch(&loop, gt, expected_parameter_count, Operand(zero_reg));
+ }
+ b(&regular_invoke);
+ nop();
+
+ bind(&stack_overflow);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ CallRuntime(Runtime::kThrowStackOverflow);
+ break_(0xCC);
+ }
+#else
+ // Check whether the expected and actual arguments count match. The registers
+ // are set up according to contract with ArgumentsAdaptorTrampoline:
Branch(&regular_invoke, eq, expected_parameter_count,
Operand(actual_parameter_count));
@@ -4400,7 +4498,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
}
-
+#endif
bind(&regular_invoke);
}
@@ -5508,17 +5606,18 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- Label* exit, DeoptimizeKind kind) {
+void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
+ Label* exit, DeoptimizeKind kind,
+ Label*) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Lw(t9,
+ MemOperand(kRootRegister, IsolateData::builtin_entry_slot_offset(target)));
+ Call(t9);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
USE(exit, kind);
- NoRootArrayScope no_root_array(this);
-
- // Save the deipt id in kRootRegister (we don't need the roots array from now
- // on).
- DCHECK_LE(deopt_id, 0xFFFF);
- li(kRootRegister, deopt_id);
-
- Call(target, RelocInfo::RUNTIME_ENTRY);
}
} // namespace internal
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index cafcc42941..d91a4a7bb8 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -237,8 +237,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void CallForDeoptimization(Address target, int deopt_id, Label* exit,
- DeoptimizeKind kind);
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label);
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
@@ -252,8 +253,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
const Operand& op = Operand(no_reg));
- // Trivial case of DropAndRet that utilizes the delay slot and only emits
- // 2 instructions.
+ // Trivial case of DropAndRet that utilizes the delay slot.
void DropAndRet(int drop);
void DropAndRet(int drop, Condition cond, Register reg, const Operand& op);
@@ -914,21 +914,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// TODO(victorgomes): Remove this function once we stick with the reversed
// arguments order.
void LoadReceiver(Register dest, Register argc) {
-#ifdef V8_REVERSE_JSARGS
Lw(dest, MemOperand(sp, 0));
-#else
- Lsa(dest, sp, argc, kPointerSizeLog2);
- Lw(dest, MemOperand(dest, 0));
-#endif
}
void StoreReceiver(Register rec, Register argc, Register scratch) {
-#ifdef V8_REVERSE_JSARGS
Sw(rec, MemOperand(sp, 0));
-#else
- Lsa(scratch, sp, argc, kPointerSizeLog2);
- Sw(rec, MemOperand(scratch, 0));
-#endif
}
// Swap two registers. If the scratch register is omitted then a slightly
@@ -1106,6 +1096,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register scratch2);
// -------------------------------------------------------------------------
+ // Stack limit utilities
+
+ enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+ void LoadStackLimit(Register destination, StackLimitKind kind);
+ void StackOverflowCheck(Register num_args, Register scratch1,
+ Register scratch2, Label* stack_overflow);
+
+ // ---------------------------------------------------------------------------
// Smi utilities.
void SmiTag(Register reg) { Addu(reg, reg, reg); }
@@ -1164,7 +1162,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class StandardFrame;
+ friend class CommonFrame;
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index b64005155d..3b16805f53 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -283,6 +283,15 @@ Assembler::Assembler(const AssemblerOptions& options,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
EmitForbiddenSlotInstruction();
int code_comments_size = WriteCodeComments();
@@ -869,8 +878,10 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
Instr instr_branch_delay;
if (IsJump(instr_j)) {
- instr_branch_delay = instr_at(pos + 6 * kInstrSize);
+ // Case when branch delay slot is protected.
+ instr_branch_delay = nopInstr;
} else {
+ // Case when branch delay slot is used.
instr_branch_delay = instr_at(pos + 7 * kInstrSize);
}
instr_at_put(pos, instr_b);
@@ -3746,6 +3757,7 @@ void Assembler::GrowBuffer() {
buffer_ = std::move(new_buffer);
buffer_start_ = new_start;
pc_ += pc_delta;
+ last_call_pc_ += pc_delta;
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
diff --git a/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc b/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
index 4014607007..f77d8d4130 100644
--- a/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
@@ -304,54 +304,6 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 3);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 509153e6c2..249fc9126b 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -2509,11 +2509,15 @@ void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src,
ctc1(scratch, FCSR);
} else {
Label done;
+ if (!IsDoubleZeroRegSet()) {
+ Move(kDoubleRegZero, 0.0);
+ }
mfhc1(scratch, src);
Ext(at, scratch, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
Branch(USE_DELAY_SLOT, &done, hs, at,
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits));
- mov_d(dst, src);
+ // Canonicalize the result.
+ sub_d(dst, src, kDoubleRegZero);
round(this, dst, src);
dmfc1(at, dst);
Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
@@ -2569,11 +2573,15 @@ void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
int32_t kFloat32MantissaBits = 23;
int32_t kFloat32ExponentBits = 8;
Label done;
+ if (!IsDoubleZeroRegSet()) {
+ Move(kDoubleRegZero, 0.0);
+ }
mfc1(scratch, src);
Ext(at, scratch, kFloat32MantissaBits, kFloat32ExponentBits);
Branch(USE_DELAY_SLOT, &done, hs, at,
Operand(kFloat32ExponentBias + kFloat32MantissaBits));
- mov_s(dst, src);
+ // Canonicalize the result.
+ sub_s(dst, src, kDoubleRegZero);
round(this, dst, src);
mfc1(at, dst);
Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
@@ -4448,9 +4456,19 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
}
void TurboAssembler::DropAndRet(int drop) {
- DCHECK(is_int16(drop * kPointerSize));
- Ret(USE_DELAY_SLOT);
- daddiu(sp, sp, drop * kPointerSize);
+ int32_t drop_size = drop * kSystemPointerSize;
+ DCHECK(is_int31(drop_size));
+
+ if (is_int16(drop_size)) {
+ Ret(USE_DELAY_SLOT);
+ daddiu(sp, sp, drop_size);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, drop_size);
+ Ret(USE_DELAY_SLOT);
+ daddu(sp, sp, scratch);
+ }
}
void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1,
@@ -4714,23 +4732,108 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
mov(sp, dst_reg);
}
+void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ CHECK(is_int32(offset));
+ Ld(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
+}
+
+void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
+ Register scratch2,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+
+ LoadStackLimit(scratch1, StackLimitKind::kRealStackLimit);
+ // Make scratch1 the space we have left. The stack might already be overflowed
+ // here which will cause scratch1 to become negative.
+ dsubu(scratch1, sp, scratch1);
+ // Check if the arguments will overflow the stack.
+ dsll(scratch2, num_args, kPointerSizeLog2);
+ // Signed comparison.
+ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+}
+
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeFlag flag) {
Label regular_invoke;
- // Check whether the expected and actual arguments count match. The registers
- // are set up according to contract with ArgumentsAdaptorTrampoline:
// a0: actual arguments count
// a1: function (passed through to callee)
// a2: expected arguments count
- // The code below is made a lot easier because the calling code already sets
- // up actual and expected registers according to the contract.
-
DCHECK_EQ(actual_parameter_count, a0);
DCHECK_EQ(expected_parameter_count, a2);
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // If the expected parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
+ Branch(&regular_invoke, eq, expected_parameter_count,
+ Operand(kDontAdaptArgumentsSentinel));
+
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
+ Dsubu(expected_parameter_count, expected_parameter_count,
+ actual_parameter_count);
+ Branch(&regular_invoke, le, expected_parameter_count, Operand(zero_reg));
+
+ Label stack_overflow;
+ StackOverflowCheck(expected_parameter_count, t0, t1, &stack_overflow);
+ // Underapplication. Move the arguments already in the stack, including the
+ // receiver and the return address.
+ {
+ Label copy;
+ Register src = a6, dest = a7;
+ mov(src, sp);
+ dsll(t0, expected_parameter_count, kSystemPointerSizeLog2);
+ Dsubu(sp, sp, Operand(t0));
+ // Update stack pointer.
+ mov(dest, sp);
+ mov(t0, actual_parameter_count);
+ bind(&copy);
+ Ld(t1, MemOperand(src, 0));
+ Sd(t1, MemOperand(dest, 0));
+ Dsubu(t0, t0, Operand(1));
+ Daddu(src, src, Operand(kSystemPointerSize));
+ Daddu(dest, dest, Operand(kSystemPointerSize));
+ Branch(&copy, ge, t0, Operand(zero_reg));
+ }
+
+ // Fill remaining expected arguments with undefined values.
+ LoadRoot(t0, RootIndex::kUndefinedValue);
+ {
+ Label loop;
+ bind(&loop);
+ Sd(t0, MemOperand(a7, 0));
+ Dsubu(expected_parameter_count, expected_parameter_count, Operand(1));
+ Daddu(a7, a7, Operand(kSystemPointerSize));
+ Branch(&loop, gt, expected_parameter_count, Operand(zero_reg));
+ }
+ b(&regular_invoke);
+ nop();
+
+ bind(&stack_overflow);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ CallRuntime(Runtime::kThrowStackOverflow);
+ break_(0xCC);
+ }
+#else
+ // Check whether the expected and actual arguments count match. The registers
+ // are set up according to contract with ArgumentsAdaptorTrampoline:
+
Branch(&regular_invoke, eq, expected_parameter_count,
Operand(actual_parameter_count));
@@ -4741,7 +4844,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
}
-
+#endif
bind(&regular_invoke);
}
@@ -5864,16 +5967,18 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- Label* exit, DeoptimizeKind kind) {
+void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
+ Label* exit, DeoptimizeKind kind,
+ Label*) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ld(t9,
+ MemOperand(kRootRegister, IsolateData::builtin_entry_slot_offset(target)));
+ Call(t9);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
USE(exit, kind);
- NoRootArrayScope no_root_array(this);
-
- // Save the deopt id in kRootRegister (we don't need the roots array from now
- // on).
- DCHECK_LE(deopt_id, 0xFFFF);
- li(kRootRegister, deopt_id);
- Call(target, RelocInfo::RUNTIME_ENTRY);
}
} // namespace internal
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index 56380cc8b2..a0d5e59bf0 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -260,8 +260,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void CallForDeoptimization(Address target, int deopt_id, Label* exit,
- DeoptimizeKind kind);
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label);
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
@@ -275,8 +276,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
const Operand& op = Operand(no_reg));
- // Trivial case of DropAndRet that utilizes the delay slot and only emits
- // 2 instructions.
+ // Trivial case of DropAndRet that utilizes the delay slot.
void DropAndRet(int drop);
void DropAndRet(int drop, Condition cond, Register reg, const Operand& op);
@@ -921,21 +921,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// TODO(victorgomes): Remove this function once we stick with the reversed
// arguments order.
void LoadReceiver(Register dest, Register argc) {
-#ifdef V8_REVERSE_JSARGS
Ld(dest, MemOperand(sp, 0));
-#else
- Dlsa(dest, sp, argc, kPointerSizeLog2);
- Ld(dest, MemOperand(dest, 0));
-#endif
}
void StoreReceiver(Register rec, Register argc, Register scratch) {
-#ifdef V8_REVERSE_JSARGS
Sd(rec, MemOperand(sp, 0));
-#else
- Dlsa(scratch, sp, argc, kPointerSizeLog2);
- Sd(rec, MemOperand(scratch, 0));
-#endif
}
bool IsNear(Label* L, Condition cond, int rs_reg);
@@ -1150,6 +1140,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register scratch2);
// -------------------------------------------------------------------------
+ // Stack limit utilities
+
+ enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+ void LoadStackLimit(Register destination, StackLimitKind kind);
+ void StackOverflowCheck(Register num_args, Register scratch1,
+ Register scratch2, Label* stack_overflow);
+
+ // ---------------------------------------------------------------------------
// Smi utilities.
void SmiTag(Register dst, Register src) {
@@ -1228,7 +1226,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class StandardFrame;
+ friend class CommonFrame;
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index b2c100aa05..bf45a5f38b 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -80,11 +80,12 @@ void OptimizedCompilationInfo::ConfigureFlags() {
if (FLAG_untrusted_code_mitigations) set_untrusted_code_mitigations();
switch (code_kind_) {
- case CodeKind::OPTIMIZED_FUNCTION:
+ case CodeKind::TURBOFAN:
if (FLAG_function_context_specialization) {
set_function_context_specializing();
}
V8_FALLTHROUGH;
+ case CodeKind::TURBOPROP:
case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
set_called_with_code_start_register();
set_switch_jump_table();
@@ -98,7 +99,7 @@ void OptimizedCompilationInfo::ConfigureFlags() {
if (FLAG_turbo_splitting) set_splitting();
break;
case CodeKind::BUILTIN:
- case CodeKind::STUB:
+ case CodeKind::FOR_TESTING:
if (FLAG_turbo_splitting) set_splitting();
#if ENABLE_GDB_JIT_INTERFACE && DEBUG
set_source_positions();
@@ -160,7 +161,7 @@ std::unique_ptr<char[]> OptimizedCompilationInfo::GetDebugName() const {
StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const {
switch (code_kind()) {
- case CodeKind::STUB:
+ case CodeKind::FOR_TESTING:
case CodeKind::BYTECODE_HANDLER:
case CodeKind::BUILTIN:
return StackFrame::STUB;
diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index 4de8ba1645..6e238d6239 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -149,7 +149,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
bool IsNativeContextIndependent() const {
return code_kind() == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
}
- bool IsStub() const { return code_kind() == CodeKind::STUB; }
+ bool IsTurboprop() const { return code_kind() == CodeKind::TURBOPROP; }
bool IsWasm() const { return code_kind() == CodeKind::WASM_FUNCTION; }
void SetOptimizingForOsr(BailoutId osr_offset, JavaScriptFrame* osr_frame) {
@@ -299,11 +299,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// 1) PersistentHandles created via PersistentHandlesScope inside of
// CompilationHandleScope
// 2) Owned by OptimizedCompilationInfo
- // 3) Owned by JSHeapBroker
- // 4) Owned by the broker's LocalHeap
- // 5) Back to the broker for a brief moment (after tearing down the
- // LocalHeap as part of exiting LocalHeapScope)
- // 6) Back to OptimizedCompilationInfo when exiting the LocalHeapScope.
+ // 3) Owned by the broker's LocalHeap when entering the LocalHeapScope.
+ // 4) Back to OptimizedCompilationInfo when exiting the LocalHeapScope.
//
// In normal execution it gets destroyed when PipelineData gets destroyed.
// There is a special case in GenerateCodeForTesting where the JSHeapBroker
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index 37a53b49f2..54136a9f2b 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -248,6 +248,15 @@ Assembler::Assembler(const AssemblerOptions& options,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
// Emit constant pool if necessary.
int constant_pool_size = EmitConstantPool();
@@ -1777,6 +1786,12 @@ void Assembler::mtvsrd(const Simd128Register rt, const Register ra) {
emit(MTVSRD | rt.code() * B21 | ra.code() * B16 | TX);
}
+void Assembler::mtvsrdd(const Simd128Register rt, const Register ra,
+ const Register rb) {
+ int TX = 1;
+ emit(MTVSRDD | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | TX);
+}
+
void Assembler::lxvd(const Simd128Register rt, const MemOperand& src) {
int TX = 1;
emit(LXVD | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
@@ -1789,6 +1804,11 @@ void Assembler::stxvd(const Simd128Register rt, const MemOperand& dst) {
SX);
}
+void Assembler::xxspltib(const Simd128Register rt, const Operand& imm) {
+ int TX = 1;
+ emit(XXSPLTIB | rt.code() * B21 | imm.immediate() * B11 | TX);
+}
+
// Pseudo instructions.
void Assembler::nop(int type) {
Register reg = r0;
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index f26a3c89c9..11497c90ce 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -1019,8 +1019,10 @@ class Assembler : public AssemblerBase {
void mfvsrd(const Register ra, const Simd128Register r);
void mfvsrwz(const Register ra, const Simd128Register r);
void mtvsrd(const Simd128Register rt, const Register ra);
+ void mtvsrdd(const Simd128Register rt, const Register ra, const Register rb);
void lxvd(const Simd128Register rt, const MemOperand& src);
void stxvd(const Simd128Register rt, const MemOperand& src);
+ void xxspltib(const Simd128Register rt, const Operand& imm);
// Pseudo instructions
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index 306175e06d..f71d1beae3 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -414,6 +414,8 @@ using Instr = uint32_t;
V(xssqrtsp, XSSQRTSP, 0xF000002C) \
/* Move To VSR Doubleword */ \
V(mtvsrd, MTVSRD, 0x7C000166) \
+ /* Move To VSR Double Doubleword */ \
+ V(mtvsrdd, MTVSRDD, 0x7C000366) \
/* Move To VSR Word Algebraic */ \
V(mtvsrwa, MTVSRWA, 0x7C0001A6) \
/* Move To VSR Word and Zero */ \
@@ -1930,7 +1932,9 @@ using Instr = uint32_t;
/* Vector Multiply-Low-Add Unsigned Halfword Modulo */ \
V(vmladduhm, VMLADDUHM, 0x10000022) \
/* Vector Select */ \
- V(vsel, VSEL, 0x1000002A)
+ V(vsel, VSEL, 0x1000002A) \
+ /* Vector Multiply-Sum Signed Halfword Modulo */ \
+ V(vmsumshm, VMSUMSHM, 0x10000028)
#define PPC_VA_OPCODE_UNUSED_LIST(V) \
/* Vector Add Extended & write Carry Unsigned Quadword */ \
@@ -1945,8 +1949,6 @@ using Instr = uint32_t;
V(vmhraddshs, VMHRADDSHS, 0x10000021) \
/* Vector Multiply-Sum Mixed Byte Modulo */ \
V(vmsummbm, VMSUMMBM, 0x10000025) \
- /* Vector Multiply-Sum Signed Halfword Modulo */ \
- V(vmsumshm, VMSUMSHM, 0x10000028) \
/* Vector Multiply-Sum Signed Halfword Saturate */ \
V(vmsumshs, VMSUMSHS, 0x10000029) \
/* Vector Multiply-Sum Unsigned Byte Modulo */ \
@@ -1998,7 +2000,9 @@ using Instr = uint32_t;
/* Store VSR Vector Doubleword*2 Indexed */ \
V(stxvd, STXVD, 0x7C000798) \
/* Store VSR Vector Word*4 Indexed */ \
- V(stxvw, STXVW, 0x7C000718)
+ V(stxvw, STXVW, 0x7C000718) \
+ /* Vector Splat Immediate Byte */ \
+ V(xxspltib, XXSPLTIB, 0xF00002D1)
#define PPC_B_OPCODE_LIST(V) \
/* Branch Conditional */ \
@@ -2202,13 +2206,29 @@ using Instr = uint32_t;
/* Rotate Left Word then AND with Mask */ \
V(rlwnm, RLWNMX, 0x5C000000)
-#define PPC_VX_OPCODE_A_FORM_LIST(V) \
- /* Vector Splat Byte */ \
- V(vspltb, VSPLTB, 0x1000020C) \
- /* Vector Splat Word */ \
- V(vspltw, VSPLTW, 0x1000028C) \
- /* Vector Splat Halfword */ \
- V(vsplth, VSPLTH, 0x1000024C)
+#define PPC_VX_OPCODE_A_FORM_LIST(V) \
+ /* Vector Splat Byte */ \
+ V(vspltb, VSPLTB, 0x1000020C) \
+ /* Vector Splat Word */ \
+ V(vspltw, VSPLTW, 0x1000028C) \
+ /* Vector Splat Halfword */ \
+ V(vsplth, VSPLTH, 0x1000024C) \
+ /* Vector Extract Unsigned Byte */ \
+ V(vextractub, VEXTRACTUB, 0x1000020D) \
+ /* Vector Extract Unsigned Halfword */ \
+ V(vextractuh, VEXTRACTUH, 0x1000024D) \
+ /* Vector Extract Unsigned Word */ \
+ V(vextractuw, VEXTRACTUW, 0x1000028D) \
+ /* Vector Extract Doubleword */ \
+ V(vextractd, VEXTRACTD, 0x100002CD) \
+ /* Vector Insert Byte */ \
+ V(vinsertb, VINSERTB, 0x1000030D) \
+ /* Vector Insert Halfword */ \
+ V(vinserth, VINSERTH, 0x1000034D) \
+ /* Vector Insert Word */ \
+ V(vinsertw, VINSERTW, 0x1000038D) \
+ /* Vector Insert Doubleword */ \
+ V(vinsertd, VINSERTD, 0x100003CD)
#define PPC_VX_OPCODE_B_FORM_LIST(V) \
/* Vector Logical OR */ \
@@ -2348,7 +2368,9 @@ using Instr = uint32_t;
/* Vector Minimum Single-Precision */ \
V(vminfp, VMINFP, 0x1000044A) \
/* Vector Maximum Single-Precision */ \
- V(vmaxfp, VMAXFP, 0x1000040A)
+ V(vmaxfp, VMAXFP, 0x1000040A) \
+ /* Vector Bit Permute Quadword */ \
+ V(vbpermq, VBPERMQ, 0x1000054C)
#define PPC_VX_OPCODE_C_FORM_LIST(V) \
/* Vector Unpack Low Signed Halfword */ \
@@ -2387,8 +2409,6 @@ using Instr = uint32_t;
V(vavgsw, VAVGSW, 0x10000582) \
/* Vector Average Unsigned Word */ \
V(vavguw, VAVGUW, 0x10000482) \
- /* Vector Bit Permute Quadword */ \
- V(vbpermq, VBPERMQ, 0x1000054C) \
/* Vector Convert From Signed Fixed-Point Word To Single-Precision */ \
V(vcfsx, VCFSX, 0x1000034A) \
/* Vector Convert From Unsigned Fixed-Point Word To Single-Precision */ \
diff --git a/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
index 4d68e01285..3c2d92237d 100644
--- a/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
@@ -278,54 +278,6 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 3);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 4a526384e0..08955805e6 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -173,9 +173,8 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependentBuiltin(*code));
int builtin_index = Builtins::kNoBuiltinId;
- bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
+ bool target_is_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
if (root_array_available_ && options().isolate_independent_code) {
Label skip;
@@ -187,8 +186,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Jump(scratch);
bind(&skip);
return;
- } else if (options().inline_offheap_trampolines &&
- target_is_isolate_independent_builtin) {
+ } else if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
Label skip;
RecordCommentForOffHeapTrampoline(builtin_index);
@@ -264,9 +262,8 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependentBuiltin(*code));
int builtin_index = Builtins::kNoBuiltinId;
- bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
+ bool target_is_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
if (root_array_available_ && options().isolate_independent_code) {
Label skip;
@@ -277,8 +274,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(ip);
bind(&skip);
return;
- } else if (options().inline_offheap_trampolines &&
- target_is_isolate_independent_builtin) {
+ } else if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
@@ -1057,10 +1053,16 @@ void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
void TurboAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address) {
+ // Builtins do not use the constant pool (see is_constant_pool_available).
+ STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
+
+ lwz(r0, MemOperand(code_target_address,
+ Code::kInstructionSizeOffset - Code::kHeaderSize));
lwz(kConstantPoolRegister,
MemOperand(code_target_address,
Code::kConstantPoolOffsetOffset - Code::kHeaderSize));
add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
+ add(kConstantPoolRegister, kConstantPoolRegister, r0);
}
void TurboAssembler::LoadPC(Register dst) {
@@ -1076,6 +1078,10 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
}
void TurboAssembler::LoadConstantPoolPointerRegister() {
+ //
+ // Builtins do not use the constant pool (see is_constant_pool_available).
+ STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
+
LoadPC(kConstantPoolRegister);
int32_t delta = -pc_offset() + 4;
add_label_offset(kConstantPoolRegister, kConstantPoolRegister,
@@ -3251,16 +3257,17 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
SizeOfCodeGeneratedSince(&start_call));
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- Label* exit, DeoptimizeKind kind) {
+void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
+ Label* exit, DeoptimizeKind kind,
+ Label*) {
+ LoadP(ip, MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(target)));
+ Call(ip);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
USE(exit, kind);
- NoRootArrayScope no_root_array(this);
-
- // Save the deopt id in r29 (we don't need the roots array from now on).
- DCHECK_LE(deopt_id, 0xFFFF);
-
- mov(r29, Operand(deopt_id));
- Call(target, RelocInfo::RUNTIME_ENTRY);
}
void TurboAssembler::ZeroExtByte(Register dst, Register src) {
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index a74985cbe1..db0d6857ac 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -441,8 +441,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void JumpCodeObject(Register code_object) override;
void CallBuiltinByIndex(Register builtin_index) override;
- void CallForDeoptimization(Address target, int deopt_id, Label* exit,
- DeoptimizeKind kind);
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
@@ -728,21 +729,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// TODO(victorgomes): Remove this function once we stick with the reversed
// arguments order.
void LoadReceiver(Register dest, Register argc) {
-#ifdef V8_REVERSE_JSARGS
LoadP(dest, MemOperand(sp, 0));
-#else
- ShiftLeftImm(dest, argc, Operand(kSystemPointerSizeLog2));
- LoadPX(dest, MemOperand(sp, dest));
-#endif
}
void StoreReceiver(Register rec, Register argc, Register scratch) {
-#ifdef V8_REVERSE_JSARGS
StoreP(rec, MemOperand(sp, 0));
-#else
- ShiftLeftImm(scratch, argc, Operand(kSystemPointerSizeLog2));
- StorePX(rec, MemOperand(sp, scratch));
-#endif
}
// ---------------------------------------------------------------------------
diff --git a/deps/v8/src/codegen/ppc/register-ppc.h b/deps/v8/src/codegen/ppc/register-ppc.h
index eded9622c4..925dc355a7 100644
--- a/deps/v8/src/codegen/ppc/register-ppc.h
+++ b/deps/v8/src/codegen/ppc/register-ppc.h
@@ -228,7 +228,11 @@ class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
// d14: 0.0
// d15: scratch register.
static constexpr int kSizeInBytes = 8;
- inline static int NumRegisters();
+
+ // This function differs from kNumRegisters by returning the number of double
+ // registers supported by the current CPU, while kNumRegisters always returns
+ // 32.
+ inline static int SupportedRegisterCount();
private:
friend class RegisterBase;
diff --git a/deps/v8/src/codegen/register-configuration.cc b/deps/v8/src/codegen/register-configuration.cc
index 5752b46339..1c48303294 100644
--- a/deps/v8/src/codegen/register-configuration.cc
+++ b/deps/v8/src/codegen/register-configuration.cc
@@ -42,6 +42,8 @@ STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
Simd128Register::kNumRegisters);
+// Callers on architectures other than Arm expect this to be be constant
+// between build and runtime. Avoid adding variability on other platforms.
static int get_num_allocatable_double_registers() {
return
#if V8_TARGET_ARCH_IA32
@@ -71,6 +73,8 @@ static int get_num_allocatable_double_registers() {
#undef REGISTER_COUNT
+// Callers on architectures other than Arm expect this to be be constant
+// between build and runtime. Avoid adding variability on other platforms.
static const int* get_allocatable_double_codes() {
return
#if V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/codegen/register-configuration.h b/deps/v8/src/codegen/register-configuration.h
index 0521599734..cdf9ddae35 100644
--- a/deps/v8/src/codegen/register-configuration.h
+++ b/deps/v8/src/codegen/register-configuration.h
@@ -29,7 +29,7 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
static constexpr int kMaxGeneralRegisters = 32;
static constexpr int kMaxFPRegisters = 32;
static constexpr int kMaxRegisters =
- Max(kMaxFPRegisters, kMaxGeneralRegisters);
+ std::max(kMaxFPRegisters, kMaxGeneralRegisters);
// Default RegisterConfigurations for the target architecture.
static const RegisterConfiguration* Default();
@@ -57,6 +57,9 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
int num_allocatable_float_registers() const {
return num_allocatable_float_registers_;
}
+ // Caution: this value depends on the current cpu and may change between
+ // build and runtime. At the time of writing, the only architecture with a
+ // variable allocatable double register set is Arm.
int num_allocatable_double_registers() const {
return num_allocatable_double_registers_;
}
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index d984b1e917..7fdc2f374a 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -476,7 +476,7 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
os << " " << Builtins::name(code.builtin_index());
}
os << ") (" << reinterpret_cast<const void*>(target_address()) << ")";
- } else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != nullptr) {
+ } else if (IsRuntimeEntry(rmode_)) {
// Deoptimization bailouts are stored as runtime entries.
DeoptimizeKind type;
if (Deoptimizer::IsDeoptimizationEntry(isolate, target_address(), &type)) {
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index d96bfd8b84..2e74f029d2 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -370,6 +370,15 @@ Assembler::Assembler(const AssemblerOptions& options,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
EmitRelocations();
int code_comments_size = WriteCodeComments();
diff --git a/deps/v8/src/codegen/s390/interface-descriptors-s390.cc b/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
index 6c56c19b5a..a848cdf27a 100644
--- a/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
@@ -278,54 +278,6 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 3);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 5c9fe62dd1..4f63543ad7 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -174,24 +174,17 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependentBuiltin(*code));
int builtin_index = Builtins::kNoBuiltinId;
- bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
-
- if (options().inline_offheap_trampolines &&
- target_is_isolate_independent_builtin) {
- Label skip;
- if (cond != al) {
- b(NegateCondition(cond), &skip, Label::kNear);
- }
+ bool target_is_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
+
+ if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- b(ip);
- bind(&skip);
+ b(cond, ip);
return;
}
jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond);
@@ -242,12 +235,10 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
int builtin_index = Builtins::kNoBuiltinId;
- bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
+ bool target_is_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
- if (options().inline_offheap_trampolines &&
- target_is_isolate_independent_builtin) {
+ if (target_is_builtin && options().inline_offheap_trampolines) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
@@ -4540,15 +4531,17 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
bind(&return_label);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- Label* exit, DeoptimizeKind kind) {
+void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
+ Label* exit, DeoptimizeKind kind,
+ Label*) {
+ LoadP(ip, MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(target)));
+ Call(ip);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
USE(exit, kind);
- NoRootArrayScope no_root_array(this);
-
- // Save the deopt id in r10 (we don't need the roots array from now on).
- DCHECK_LE(deopt_id, 0xFFFF);
- lghi(r10, Operand(deopt_id));
- Call(target, RelocInfo::RUNTIME_ENTRY);
}
void TurboAssembler::Trap() { stop(); }
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index f66be8c2ef..f81dfb503b 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -153,8 +153,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Ret() { b(r14); }
void Ret(Condition cond) { b(cond, r14); }
- void CallForDeoptimization(Address target, int deopt_id, Label* exit,
- DeoptimizeKind kind);
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
@@ -1072,21 +1073,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// TODO(victorgomes): Remove this function once we stick with the reversed
// arguments order.
void LoadReceiver(Register dest, Register argc) {
-#ifdef V8_REVERSE_JSARGS
LoadP(dest, MemOperand(sp, 0));
-#else
- ShiftLeftP(dest, argc, Operand(kSystemPointerSizeLog2));
- LoadP(dest, MemOperand(sp, dest));
-#endif
}
void StoreReceiver(Register rec, Register argc, Register scratch) {
-#ifdef V8_REVERSE_JSARGS
StoreP(rec, MemOperand(sp, 0));
-#else
- ShiftLeftP(scratch, argc, Operand(kSystemPointerSizeLog2));
- StoreP(rec, MemOperand(sp, scratch));
-#endif
}
void CallRuntime(const Runtime::Function* f, int num_arguments,
diff --git a/deps/v8/src/codegen/s390/register-s390.h b/deps/v8/src/codegen/s390/register-s390.h
index 009248a65c..0c6da03901 100644
--- a/deps/v8/src/codegen/s390/register-s390.h
+++ b/deps/v8/src/codegen/s390/register-s390.h
@@ -186,7 +186,11 @@ class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
// d14: 0.0
// d15: scratch register.
static constexpr int kSizeInBytes = 8;
- inline static int NumRegisters();
+
+ // This function differs from kNumRegisters by returning the number of double
+ // registers supported by the current CPU, while kNumRegisters always returns
+ // 32.
+ inline static int SupportedRegisterCount();
private:
friend class RegisterBase;
diff --git a/deps/v8/src/codegen/safepoint-table.cc b/deps/v8/src/codegen/safepoint-table.cc
index 396cc9007f..644931e0ea 100644
--- a/deps/v8/src/codegen/safepoint-table.cc
+++ b/deps/v8/src/codegen/safepoint-table.cc
@@ -120,7 +120,7 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
RemoveDuplicates();
// Make sure the safepoint table is properly aligned. Pad with nops.
- assembler->Align(kIntSize);
+ assembler->Align(Code::kMetadataAlignment);
assembler->RecordComment(";;; Safepoint table.");
offset_ = assembler->pc_offset();
diff --git a/deps/v8/src/codegen/tnode.h b/deps/v8/src/codegen/tnode.h
index ba1f609bcf..72f9c6e3aa 100644
--- a/deps/v8/src/codegen/tnode.h
+++ b/deps/v8/src/codegen/tnode.h
@@ -238,28 +238,11 @@ struct UnionT {
using AnyTaggedT = UnionT<Object, MaybeObject>;
using Number = UnionT<Smi, HeapNumber>;
using Numeric = UnionT<Number, BigInt>;
+using ContextOrEmptyContext = UnionT<Context, Smi>;
// A pointer to a builtin function, used by Torque's function pointers.
using BuiltinPtr = Smi;
-class int31_t {
- public:
- int31_t() : value_(0) {}
- int31_t(int value) : value_(value) { // NOLINT(runtime/explicit)
- DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
- }
- int31_t& operator=(int value) {
- DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
- value_ = value;
- return *this;
- }
- int32_t value() const { return value_; }
- operator int32_t() const { return value_; }
-
- private:
- int32_t value_;
-};
-
template <class T, class U>
struct is_subtype {
static const bool value =
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index c1e2ec9808..5327745a02 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -246,6 +246,9 @@ bool ConstPool::AddSharedEntry(uint64_t data, int offset) {
bool ConstPool::TryRecordEntry(intptr_t data, RelocInfo::Mode mode) {
if (!FLAG_partial_constant_pool) return false;
+ DCHECK_WITH_MSG(
+ FLAG_text_is_readable,
+ "The partial constant pool requires a readable .text section");
if (!RelocInfo::IsShareableRelocMode(mode)) return false;
// Currently, partial constant pool only handles the following kinds of
@@ -332,6 +335,15 @@ Assembler::Assembler(const AssemblerOptions& options,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
PatchConstPool();
DCHECK(constpool_.IsEmpty());
@@ -1207,13 +1219,6 @@ void Assembler::decb(Operand dst) {
emit_operand(1, dst);
}
-void Assembler::enter(Immediate size) {
- EnsureSpace ensure_space(this);
- emit(0xC8);
- emitw(size.value_); // 16 bit operand, always.
- emit(0);
-}
-
void Assembler::hlt() {
EnsureSpace ensure_space(this);
emit(0xF4);
@@ -2757,8 +2762,16 @@ void Assembler::movdqu(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-void Assembler::pinsrw(XMMRegister dst, Register src, int8_t imm8) {
- DCHECK(is_uint8(imm8));
+void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::pinsrw(XMMRegister dst, Register src, uint8_t imm8) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -2768,8 +2781,7 @@ void Assembler::pinsrw(XMMRegister dst, Register src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pinsrw(XMMRegister dst, Operand src, int8_t imm8) {
- DCHECK(is_uint8(imm8));
+void Assembler::pinsrw(XMMRegister dst, Operand src, uint8_t imm8) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -2791,7 +2803,7 @@ void Assembler::pextrq(Register dst, XMMRegister src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pinsrq(XMMRegister dst, Register src, int8_t imm8) {
+void Assembler::pinsrq(XMMRegister dst, Register src, uint8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -2803,9 +2815,8 @@ void Assembler::pinsrq(XMMRegister dst, Register src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pinsrq(XMMRegister dst, Operand src, int8_t imm8) {
+void Assembler::pinsrq(XMMRegister dst, Operand src, uint8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
- DCHECK(is_uint8(imm8));
EnsureSpace ensure_space(this);
emit(0x66);
emit_rex_64(dst, src);
@@ -2816,22 +2827,20 @@ void Assembler::pinsrq(XMMRegister dst, Operand src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pinsrd(XMMRegister dst, Register src, int8_t imm8) {
+void Assembler::pinsrd(XMMRegister dst, Register src, uint8_t imm8) {
sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x22, imm8);
}
-void Assembler::pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
- DCHECK(is_uint8(imm8));
+void Assembler::pinsrd(XMMRegister dst, Operand src, uint8_t imm8) {
sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x22);
emit(imm8);
}
-void Assembler::pinsrb(XMMRegister dst, Register src, int8_t imm8) {
+void Assembler::pinsrb(XMMRegister dst, Register src, uint8_t imm8) {
sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x20, imm8);
}
-void Assembler::pinsrb(XMMRegister dst, Operand src, int8_t imm8) {
- DCHECK(is_uint8(imm8));
+void Assembler::pinsrb(XMMRegister dst, Operand src, uint8_t imm8) {
sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x20);
emit(imm8);
}
@@ -2990,6 +2999,42 @@ void Assembler::movss(Operand src, XMMRegister dst) {
emit_sse_operand(dst, src);
}
+void Assembler::movlps(XMMRegister dst, Operand src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x12);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::movlps(Operand src, XMMRegister dst) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x13);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::movhps(XMMRegister dst, Operand src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x16);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::movhps(Operand src, XMMRegister dst) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x17);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::cmpps(XMMRegister dst, XMMRegister src, int8_t cmp) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
@@ -3463,6 +3508,38 @@ void Assembler::vmovdqu(Operand dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
+void Assembler::vmovlps(XMMRegister dst, XMMRegister src1, Operand src2) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
+ emit(0x12);
+ emit_sse_operand(dst, src2);
+}
+
+void Assembler::vmovlps(Operand dst, XMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(src, xmm0, dst, kL128, kNone, k0F, kWIG);
+ emit(0x13);
+ emit_sse_operand(src, dst);
+}
+
+void Assembler::vmovhps(XMMRegister dst, XMMRegister src1, Operand src2) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
+ emit(0x16);
+ emit_sse_operand(dst, src2);
+}
+
+void Assembler::vmovhps(Operand dst, XMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(src, xmm0, dst, kL128, kNone, k0F, kWIG);
+ emit(0x17);
+ emit_sse_operand(src, dst);
+}
+
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2, SIMDPrefix pp, LeadingOpcode m,
VexW w) {
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index ac0c66ae5d..e05eaa9592 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -561,7 +561,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void popq(Register dst);
void popq(Operand dst);
- void enter(Immediate size);
void leave();
// Moves
@@ -929,6 +928,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movss(XMMRegister dst, Operand src);
void movss(Operand dst, XMMRegister src);
+
+ void movlps(XMMRegister dst, Operand src);
+ void movlps(Operand dst, XMMRegister src);
+
+ void movhps(XMMRegister dst, Operand src);
+ void movhps(Operand dst, XMMRegister src);
+
void shufps(XMMRegister dst, XMMRegister src, byte imm8);
void cvttss2si(Register dst, Operand src);
@@ -1060,16 +1066,18 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSE4_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
SSE4_UNOP_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
+ DECLARE_SSE4_INSTRUCTION(pblendvb, 66, 0F, 38, 10)
+ DECLARE_SSE4_INSTRUCTION(blendvps, 66, 0F, 38, 14)
DECLARE_SSE4_INSTRUCTION(blendvpd, 66, 0F, 38, 15)
#undef DECLARE_SSE4_INSTRUCTION
#define DECLARE_SSE4_EXTRACT_INSTRUCTION(instruction, prefix, escape1, \
escape2, opcode) \
- void instruction(Register dst, XMMRegister src, int8_t imm8) { \
+ void instruction(Register dst, XMMRegister src, uint8_t imm8) { \
sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode, \
imm8); \
} \
- void instruction(Operand dst, XMMRegister src, int8_t imm8) { \
+ void instruction(Operand dst, XMMRegister src, uint8_t imm8) { \
sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode, \
imm8); \
}
@@ -1120,6 +1128,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSSE3_UNOP_INSTRUCTION_LIST(DECLARE_SSSE3_UNOP_AVX_INSTRUCTION)
#undef DECLARE_SSSE3_UNOP_AVX_INSTRUCTION
+ void vpblendvb(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister mask) {
+ vinstr(0x4C, dst, src1, src2, k66, k0F3A, kW0);
+ // The mask operand is encoded in bits[7:4] of the immediate byte.
+ emit(mask.code() << 4);
+ }
+
+ void vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister mask) {
+ vinstr(0x4A, dst, src1, src2, k66, k0F3A, kW0);
+ // The mask operand is encoded in bits[7:4] of the immediate byte.
+ emit(mask.code() << 4);
+ }
+
void vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister mask) {
vinstr(0x4B, dst, src1, src2, k66, k0F3A, kW0);
@@ -1138,6 +1160,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSE4_UNOP_INSTRUCTION_LIST(DECLARE_SSE4_PMOV_AVX_INSTRUCTION)
#undef DECLARE_SSE4_PMOV_AVX_INSTRUCTION
+#define DECLARE_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, opcode) \
+ void v##instruction(Register dst, XMMRegister src, uint8_t imm8) { \
+ XMMRegister idst = XMMRegister::from_code(dst.code()); \
+ vinstr(0x##opcode, src, xmm0, idst, k##prefix, k##escape1##escape2, kW0); \
+ emit(imm8); \
+ } \
+ void v##instruction(Operand dst, XMMRegister src, uint8_t imm8) { \
+ vinstr(0x##opcode, src, xmm0, dst, k##prefix, k##escape1##escape2, kW0); \
+ emit(imm8); \
+ }
+
+ SSE4_EXTRACT_INSTRUCTION_LIST(DECLARE_AVX_INSTRUCTION)
+#undef DECLARE_AVX_INSTRUCTION
+
void movd(XMMRegister dst, Register src);
void movd(XMMRegister dst, Operand src);
void movd(Register dst, XMMRegister src);
@@ -1160,6 +1196,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movdqu(Operand dst, XMMRegister src);
void movdqu(XMMRegister dst, Operand src);
+ void movdqu(XMMRegister dst, XMMRegister src);
void movapd(XMMRegister dst, XMMRegister src);
void movupd(XMMRegister dst, Operand src);
@@ -1204,14 +1241,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void insertps(XMMRegister dst, XMMRegister src, byte imm8);
void insertps(XMMRegister dst, Operand src, byte imm8);
void pextrq(Register dst, XMMRegister src, int8_t imm8);
- void pinsrb(XMMRegister dst, Register src, int8_t imm8);
- void pinsrb(XMMRegister dst, Operand src, int8_t imm8);
- void pinsrw(XMMRegister dst, Register src, int8_t imm8);
- void pinsrw(XMMRegister dst, Operand src, int8_t imm8);
- void pinsrd(XMMRegister dst, Register src, int8_t imm8);
- void pinsrd(XMMRegister dst, Operand src, int8_t imm8);
- void pinsrq(XMMRegister dst, Register src, int8_t imm8);
- void pinsrq(XMMRegister dst, Operand src, int8_t imm8);
+ void pinsrb(XMMRegister dst, Register src, uint8_t imm8);
+ void pinsrb(XMMRegister dst, Operand src, uint8_t imm8);
+ void pinsrw(XMMRegister dst, Register src, uint8_t imm8);
+ void pinsrw(XMMRegister dst, Operand src, uint8_t imm8);
+ void pinsrd(XMMRegister dst, Register src, uint8_t imm8);
+ void pinsrd(XMMRegister dst, Operand src, uint8_t imm8);
+ void pinsrq(XMMRegister dst, Register src, uint8_t imm8);
+ void pinsrq(XMMRegister dst, Operand src, uint8_t imm8);
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
@@ -1290,6 +1327,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmovdqu(XMMRegister dst, Operand src);
void vmovdqu(Operand dst, XMMRegister src);
+ void vmovlps(XMMRegister dst, XMMRegister src1, Operand src2);
+ void vmovlps(Operand dst, XMMRegister src);
+
+ void vmovhps(XMMRegister dst, XMMRegister src1, Operand src2);
+ void vmovhps(Operand dst, XMMRegister src);
+
#define AVX_SSE_UNOP(instr, escape, opcode) \
void v##instr(XMMRegister dst, XMMRegister src2) { \
vps(0x##opcode, dst, xmm0, src2); \
@@ -1532,38 +1575,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x21, dst, src1, src2, k66, k0F3A, kWIG);
emit(imm8);
}
- void vextractps(Register dst, XMMRegister src, int8_t imm8) {
- XMMRegister idst = XMMRegister::from_code(dst.code());
- vinstr(0x17, src, xmm0, idst, k66, k0F3A, kWIG);
- emit(imm8);
- }
- void vpextrb(Register dst, XMMRegister src, uint8_t imm8) {
- XMMRegister idst = XMMRegister::from_code(dst.code());
- vinstr(0x14, src, xmm0, idst, k66, k0F3A, kW0);
- emit(imm8);
- }
- void vpextrb(Operand dst, XMMRegister src, uint8_t imm8) {
- vinstr(0x14, src, xmm0, dst, k66, k0F3A, kW0);
- emit(imm8);
- }
- void vpextrw(Register dst, XMMRegister src, uint8_t imm8) {
- XMMRegister idst = XMMRegister::from_code(dst.code());
- vinstr(0xc5, idst, xmm0, src, k66, k0F, kW0);
- emit(imm8);
- }
- void vpextrw(Operand dst, XMMRegister src, uint8_t imm8) {
- vinstr(0x15, src, xmm0, dst, k66, k0F3A, kW0);
- emit(imm8);
- }
- void vpextrd(Register dst, XMMRegister src, uint8_t imm8) {
- XMMRegister idst = XMMRegister::from_code(dst.code());
- vinstr(0x16, src, xmm0, idst, k66, k0F3A, kW0);
- emit(imm8);
- }
- void vpextrd(Operand dst, XMMRegister src, uint8_t imm8) {
- vinstr(0x16, src, xmm0, dst, k66, k0F3A, kW0);
- emit(imm8);
- }
void vpextrq(Register dst, XMMRegister src, int8_t imm8) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x16, src, xmm0, idst, k66, k0F3A, kW1);
@@ -1596,12 +1607,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x22, dst, src1, src2, k66, k0F3A, kW0);
emit(imm8);
}
- void vpinsrq(XMMRegister dst, XMMRegister src1, Register src2, int8_t imm8) {
+ void vpinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
XMMRegister isrc = XMMRegister::from_code(src2.code());
vinstr(0x22, dst, src1, isrc, k66, k0F3A, kW1);
emit(imm8);
}
- void vpinsrq(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8) {
+ void vpinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
vinstr(0x22, dst, src1, src2, k66, k0F3A, kW1);
emit(imm8);
}
diff --git a/deps/v8/src/codegen/x64/interface-descriptors-x64.cc b/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
index 5a9c386eb8..e4d6b92708 100644
--- a/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
@@ -129,16 +129,6 @@ void CallWithSpreadDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments (on the stack, not including receiver)
- // rdi : the target to call
- // rbx : the object to spread
- // rdx : the feedback slot
- Register registers[] = {rdi, rax, rbx, rdx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rdi : the target to call
@@ -147,16 +137,6 @@ void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rdi : the target to call
- // rbx : the arguments list
- // rdx : the feedback slot
- // rax : the feedback vector
- Register registers[] = {rdi, rbx, rdx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void ConstructVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments (on the stack, not including receiver)
@@ -188,16 +168,6 @@ void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments (on the stack, not including receiver)
- // rdi : the target to call
- // rdx : the new target
- // rbx : the feedback slot
- Register registers[] = {rdi, rdx, rax, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rdi : the target to call
@@ -207,16 +177,6 @@ void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rdi : the target to call
- // rdx : the new target
- // rbx : the arguments list
- // rax : the feedback slot
- Register registers[] = {rdi, rdx, rbx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments
@@ -320,41 +280,6 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx, // kLeft
- rax, // kRight
- rdi, // kSlot
- rbx}; // kMaybeFeedbackVector
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdi, // kFunction
- rax, // kActualArgumentsCount
- rcx, // kSlot
- rbx}; // kMaybeFeedbackVector
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx, // kLeft
- rax, // kRight
- rdi, // kSlot
- rbx}; // kMaybeFeedbackVector
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx, // kValue
- rax, // kSlot
- rdi}; // kMaybeFeedbackVector
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index 7f7ff5038a..9f5917c23a 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -37,16 +37,9 @@ namespace internal {
Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
DCHECK_GE(index, 0);
-#ifdef V8_REVERSE_JSARGS
// arg[0] = rsp + kPCOnStackSize;
// arg[i] = arg[0] + i * kSystemPointerSize;
return Operand(rsp, kPCOnStackSize + index * kSystemPointerSize);
-#else
- // arg[0] = (rsp + kPCOnStackSize) + argc * kSystemPointerSize;
- // arg[i] = arg[0] - i * kSystemPointerSize;
- return Operand(rsp, argc_, times_system_pointer_size,
- kPCOnStackSize - index * kSystemPointerSize);
-#endif
}
void MacroAssembler::Load(Register destination, ExternalReference source) {
@@ -343,11 +336,22 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
void TurboAssembler::LoadExternalPointerField(Register destination,
- Operand field_operand) {
- movq(destination, field_operand);
- if (V8_HEAP_SANDBOX_BOOL) {
- xorq(destination, Immediate(kExternalPointerSalt));
+ Operand field_operand,
+ ExternalPointerTag tag) {
+#ifdef V8_HEAP_SANDBOX
+ LoadAddress(kScratchRegister,
+ ExternalReference::external_pointer_table_address(isolate()));
+ movq(kScratchRegister,
+ Operand(kScratchRegister, Internals::kExternalPointerTableBufferOffset));
+ movl(destination, field_operand);
+ movq(destination, Operand(kScratchRegister, destination, times_8, 0));
+ if (tag != 0) {
+ movq(kScratchRegister, Immediate64(tag));
+ xorq(destination, kScratchRegister);
}
+#else
+ movq(destination, field_operand);
+#endif // V8_HEAP_SANDBOX
}
void TurboAssembler::RestoreRegisters(RegList registers) {
@@ -1062,6 +1066,14 @@ void TurboAssembler::Move(Register dst, ExternalReference ext) {
movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
}
+void MacroAssembler::Cmp(Register dst, int32_t src) {
+ if (src == 0) {
+ testl(dst, dst);
+ } else {
+ cmpl(dst, Immediate(src));
+ }
+}
+
void MacroAssembler::SmiTag(Register reg) {
STATIC_ASSERT(kSmiTag == 0);
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
@@ -1356,7 +1368,7 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
void TurboAssembler::Move(XMMRegister dst, uint64_t high, uint64_t low) {
Move(dst, low);
movq(kScratchRegister, high);
- Pinsrq(dst, kScratchRegister, int8_t{1});
+ Pinsrq(dst, kScratchRegister, uint8_t{1});
}
// ----------------------------------------------------------------------------
@@ -1526,8 +1538,7 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependentBuiltin(*code_object));
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index)) {
Label skip;
if (cc != always) {
if (cc == never) return;
@@ -1576,8 +1587,7 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
Builtins::IsIsolateIndependentBuiltin(*code_object));
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index)) {
// Inline the trampoline.
CallBuiltin(builtin_index);
return;
@@ -1587,6 +1597,13 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
call(code_object, rmode);
}
+Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(
+ Builtins::Name builtin_index) {
+ DCHECK(root_array_available());
+ return Operand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(builtin_index));
+}
+
Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) {
if (SmiValuesAre32Bits()) {
// The builtin_index register contains the builtin index as a Smi.
@@ -1710,7 +1727,19 @@ void TurboAssembler::RetpolineJump(Register reg) {
ret(0);
}
-void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
+void TurboAssembler::Shufps(XMMRegister dst, XMMRegister src, byte imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vshufps(dst, src, src, imm8);
+ } else {
+ if (dst != src) {
+ movss(dst, src);
+ }
+ shufps(dst, src, static_cast<byte>(0));
+ }
+}
+
+void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
return;
@@ -1729,43 +1758,71 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
shrq(dst, Immediate(32));
}
-void TurboAssembler::Pextrw(Register dst, XMMRegister src, int8_t imm8) {
+namespace {
+
+template <typename Src>
+using AvxFn = void (Assembler::*)(XMMRegister, XMMRegister, Src, uint8_t);
+template <typename Src>
+using NoAvxFn = void (Assembler::*)(XMMRegister, Src, uint8_t);
+
+template <typename Src>
+void PinsrHelper(Assembler* assm, AvxFn<Src> avx, NoAvxFn<Src> noavx,
+ XMMRegister dst, XMMRegister src1, Src src2, uint8_t imm8,
+ base::Optional<CpuFeature> feature = base::nullopt) {
if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpextrw(dst, src, imm8);
- return;
- } else {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- CpuFeatureScope sse_scope(this, SSE4_1);
- pextrw(dst, src, imm8);
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx)(dst, src1, src2, imm8);
return;
}
-}
-void TurboAssembler::Pextrb(Register dst, XMMRegister src, int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpextrb(dst, src, imm8);
- return;
+ if (dst != src1) {
+ assm->movdqu(dst, src1);
+ }
+ if (feature.has_value()) {
+ DCHECK(CpuFeatures::IsSupported(*feature));
+ CpuFeatureScope scope(assm, *feature);
+ (assm->*noavx)(dst, src2, imm8);
} else {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- CpuFeatureScope sse_scope(this, SSE4_1);
- pextrb(dst, src, imm8);
- return;
+ (assm->*noavx)(dst, src2, imm8);
}
}
+} // namespace
-void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrd(dst, dst, src, imm8);
- return;
- } else if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pinsrd(dst, src, imm8);
+void TurboAssembler::Pinsrb(XMMRegister dst, XMMRegister src1, Register src2,
+ uint8_t imm8) {
+ PinsrHelper(this, &Assembler::vpinsrb, &Assembler::pinsrb, dst, src1, src2,
+ imm8, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void TurboAssembler::Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2,
+ uint8_t imm8) {
+ PinsrHelper(this, &Assembler::vpinsrb, &Assembler::pinsrb, dst, src1, src2,
+ imm8, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Register src2,
+ uint8_t imm8) {
+ PinsrHelper(this, &Assembler::vpinsrw, &Assembler::pinsrw, dst, src1, src2,
+ imm8);
+}
+
+void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2,
+ uint8_t imm8) {
+ PinsrHelper(this, &Assembler::vpinsrw, &Assembler::pinsrw, dst, src1, src2,
+ imm8);
+}
+
+void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Register src2,
+ uint8_t imm8) {
+ // Need a fall back when SSE4_1 is unavailable. Pinsrb and Pinsrq are used
+ // only by Wasm SIMD, which requires SSE4_1 already.
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ PinsrHelper(this, &Assembler::vpinsrd, &Assembler::pinsrd, dst, src1, src2,
+ imm8, base::Optional<CpuFeature>(SSE4_1));
return;
}
- Movd(kScratchDoubleReg, src);
+
+ Movd(kScratchDoubleReg, src2);
if (imm8 == 1) {
punpckldq(dst, kScratchDoubleReg);
} else {
@@ -1774,17 +1831,17 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
}
}
-void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrd(dst, dst, src, imm8);
- return;
- } else if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pinsrd(dst, src, imm8);
+void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
+ uint8_t imm8) {
+ // Need a fall back when SSE4_1 is unavailable. Pinsrb and Pinsrq are used
+ // only by Wasm SIMD, which requires SSE4_1 already.
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ PinsrHelper(this, &Assembler::vpinsrd, &Assembler::pinsrd, dst, src1, src2,
+ imm8, base::Optional<CpuFeature>(SSE4_1));
return;
}
- Movd(kScratchDoubleReg, src);
+
+ Movd(kScratchDoubleReg, src2);
if (imm8 == 1) {
punpckldq(dst, kScratchDoubleReg);
} else {
@@ -1793,54 +1850,24 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
}
}
-void TurboAssembler::Pinsrw(XMMRegister dst, Register src, int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrw(dst, dst, src, imm8);
- return;
- } else {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- CpuFeatureScope sse_scope(this, SSE4_1);
- pinsrw(dst, src, imm8);
- return;
- }
+void TurboAssembler::Pinsrd(XMMRegister dst, Register src2, uint8_t imm8) {
+ Pinsrd(dst, dst, src2, imm8);
}
-void TurboAssembler::Pinsrw(XMMRegister dst, Operand src, int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrw(dst, dst, src, imm8);
- return;
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pinsrw(dst, src, imm8);
- return;
- }
+void TurboAssembler::Pinsrd(XMMRegister dst, Operand src2, uint8_t imm8) {
+ Pinsrd(dst, dst, src2, imm8);
}
-void TurboAssembler::Pinsrb(XMMRegister dst, Register src, int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrb(dst, dst, src, imm8);
- return;
- } else {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- CpuFeatureScope sse_scope(this, SSE4_1);
- pinsrb(dst, src, imm8);
- return;
- }
+void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Register src2,
+ uint8_t imm8) {
+ PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
+ imm8, base::Optional<CpuFeature>(SSE4_1));
}
-void TurboAssembler::Pinsrb(XMMRegister dst, Operand src, int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrb(dst, dst, src, imm8);
- return;
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pinsrb(dst, src, imm8);
- return;
- }
+void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2,
+ uint8_t imm8) {
+ PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
+ imm8, base::Optional<CpuFeature>(SSE4_1));
}
void TurboAssembler::Psllq(XMMRegister dst, byte imm8) {
@@ -1873,6 +1900,58 @@ void TurboAssembler::Pslld(XMMRegister dst, byte imm8) {
}
}
+void TurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister mask) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpblendvb(dst, src1, src2, mask);
+ } else {
+ DCHECK_EQ(dst, src1);
+ DCHECK_EQ(xmm0, mask);
+ pblendvb(dst, src2);
+ }
+}
+
+void TurboAssembler::Blendvps(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister mask) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vblendvps(dst, src1, src2, mask);
+ } else {
+ DCHECK_EQ(dst, src1);
+ DCHECK_EQ(xmm0, mask);
+ blendvps(dst, src2);
+ }
+}
+
+void TurboAssembler::Blendvpd(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister mask) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vblendvpd(dst, src1, src2, mask);
+ } else {
+ DCHECK_EQ(dst, src1);
+ DCHECK_EQ(xmm0, mask);
+ blendvpd(dst, src2);
+ }
+}
+
+void TurboAssembler::Pshufb(XMMRegister dst, XMMRegister src,
+ XMMRegister mask) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpshufb(dst, src, mask);
+ } else {
+ // Make sure these are different so that we won't overwrite mask.
+ DCHECK_NE(dst, mask);
+ if (dst != src) {
+ movapd(dst, src);
+ }
+ CpuFeatureScope sse_scope(this, SSSE3);
+ pshufb(dst, mask);
+ }
+}
+
void TurboAssembler::Psrld(XMMRegister dst, byte imm8) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -2315,8 +2394,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register actual_parameter_count,
InvokeFlag flag) {
// You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
- DCHECK(function == rdi);
+ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_EQ(function, rdi);
DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx);
// On function call, call into the debugger if necessary.
@@ -2327,7 +2406,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Operand debug_hook_active_operand =
ExternalReferenceAsOperand(debug_hook_active);
cmpb(debug_hook_active_operand, Immediate(0));
- j(not_equal, &debug_hook, Label::kNear);
+ j(not_equal, &debug_hook);
}
bind(&continue_after_hook);
@@ -2355,24 +2434,67 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
bind(&debug_hook);
CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
actual_parameter_count);
- jmp(&continue_after_hook, Label::kNear);
+ jmp(&continue_after_hook);
bind(&done);
}
+Operand MacroAssembler::StackLimitAsOperand(StackLimitKind kind) {
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ CHECK(is_int32(offset));
+ return Operand(kRootRegister, static_cast<int32_t>(offset));
+}
+
+void MacroAssembler::StackOverflowCheck(
+ Register num_args, Register scratch, Label* stack_overflow,
+ Label::Distance stack_overflow_distance) {
+ DCHECK_NE(num_args, scratch);
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ movq(kScratchRegister, StackLimitAsOperand(StackLimitKind::kRealStackLimit));
+ movq(scratch, rsp);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ subq(scratch, kScratchRegister);
+ // TODO(victorgomes): Use ia32 approach with leaq, since it requires less
+ // instructions.
+ sarq(scratch, Immediate(kSystemPointerSizeLog2));
+ // Check if the arguments will overflow the stack.
+ cmpq(scratch, num_args);
+ // Signed comparison.
+ // TODO(victorgomes): Save some bytes in the builtins that use stack checks
+ // by jumping to a builtin that throws the exception.
+ j(less_equal, stack_overflow, stack_overflow_distance);
+}
+
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeFlag flag) {
if (expected_parameter_count != actual_parameter_count) {
Label regular_invoke;
#ifdef V8_NO_ARGUMENTS_ADAPTOR
- // Skip if adaptor sentinel.
+ // If the expected parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
cmpl(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
- j(equal, &regular_invoke, Label::kNear);
+ j(equal, &regular_invoke, Label::kFar);
- // Skip if overapplication or if expected number of arguments.
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
subq(expected_parameter_count, actual_parameter_count);
- j(less_equal, &regular_invoke, Label::kNear);
+ j(less_equal, &regular_invoke, Label::kFar);
+
+ Label stack_overflow;
+ StackOverflowCheck(expected_parameter_count, rcx, &stack_overflow);
// Underapplication. Move the arguments already in the stack, including the
// receiver and the return address.
@@ -2409,6 +2531,15 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
kScratchRegister);
j(greater, &loop, Label::kNear);
}
+ jmp(&regular_invoke);
+
+ bind(&stack_overflow);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ CallRuntime(Runtime::kThrowStackOverflow);
+ int3(); // This should be unreachable.
+ }
#else
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
@@ -2449,13 +2580,7 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Push(fun);
Push(fun);
// Arguments are located 2 words below the base pointer.
-#ifdef V8_REVERSE_JSARGS
Operand receiver_op = Operand(rbp, kSystemPointerSize * 2);
-#else
- Operand receiver_op =
- Operand(rbp, actual_parameter_count, times_system_pointer_size,
- kSystemPointerSize * 2);
-#endif
Push(receiver_op);
CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
@@ -2831,13 +2956,18 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
Set(kSpeculationPoisonRegister, -1);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- Label* exit, DeoptimizeKind kind) {
+void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
+ Label* exit, DeoptimizeKind kind,
+ Label*) {
+ // Note: Assembler::call is used here on purpose to guarantee fixed-size
+ // exits even on Atom CPUs; see TurboAssembler::Call for Atom-specific
+ // performance tuning which emits a different instruction sequence.
+ call(EntryFromBuiltinIndexAsOperand(target));
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
USE(exit, kind);
- NoRootArrayScope no_root_array(this);
- // Save the deopt id in r13 (we don't need the roots array from now on).
- movq(r13, Immediate(deopt_id));
- call(target, RelocInfo::RUNTIME_ENTRY);
}
void TurboAssembler::Trap() { int3(); }
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index 995f2565cc..9fc4d94768 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -33,6 +33,10 @@ struct SmiIndex {
ScaleFactor scale;
};
+// TODO(victorgomes): Move definition to macro-assembler.h, once all other
+// platforms are updated.
+enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
// Convenient class to access arguments below the stack pointer.
class StackArgumentsAccessor {
public:
@@ -145,6 +149,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Movss, movss)
AVX_OP(Movsd, movsd)
AVX_OP(Movdqu, movdqu)
+ AVX_OP(Movlps, movlps)
+ AVX_OP(Movhps, movhps)
AVX_OP(Pcmpeqb, pcmpeqb)
AVX_OP(Pcmpeqw, pcmpeqw)
AVX_OP(Pcmpeqd, pcmpeqd)
@@ -222,7 +228,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Divpd, divpd)
AVX_OP(Maxps, maxps)
AVX_OP(Maxpd, maxpd)
- AVX_OP(Shufps, shufps)
AVX_OP(Cvtdq2ps, cvtdq2ps)
AVX_OP(Rcpps, rcpps)
AVX_OP(Rsqrtps, rsqrtps)
@@ -281,6 +286,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP_SSE4_1(Pmovzxbw, pmovzxbw)
AVX_OP_SSE4_1(Pmovzxwd, pmovzxwd)
AVX_OP_SSE4_1(Pmovzxdq, pmovzxdq)
+ AVX_OP_SSE4_1(Pextrb, pextrb)
+ AVX_OP_SSE4_1(Pextrw, pextrw)
AVX_OP_SSE4_1(Pextrq, pextrq)
AVX_OP_SSE4_1(Roundps, roundps)
AVX_OP_SSE4_1(Roundpd, roundpd)
@@ -488,6 +495,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(ExternalReference ext);
void Call(Label* target) { call(target); }
+ Operand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
Operand EntryFromBuiltinIndexAsOperand(Register builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index);
@@ -507,22 +515,29 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void RetpolineJump(Register reg);
- void CallForDeoptimization(Address target, int deopt_id, Label* exit,
- DeoptimizeKind kind);
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label);
void Trap() override;
void DebugBreak() override;
+ // Shufps that will mov src into dst if AVX is not supported.
+ void Shufps(XMMRegister dst, XMMRegister src, byte imm8);
+
// Non-SSE2 instructions.
- void Pextrd(Register dst, XMMRegister src, int8_t imm8);
- void Pextrw(Register dst, XMMRegister src, int8_t imm8);
- void Pextrb(Register dst, XMMRegister src, int8_t imm8);
- void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
- void Pinsrd(XMMRegister dst, Operand src, int8_t imm8);
- void Pinsrw(XMMRegister dst, Register src, int8_t imm8);
- void Pinsrw(XMMRegister dst, Operand src, int8_t imm8);
- void Pinsrb(XMMRegister dst, Register src, int8_t imm8);
- void Pinsrb(XMMRegister dst, Operand src, int8_t imm8);
+ void Pextrd(Register dst, XMMRegister src, uint8_t imm8);
+
+ void Pinsrb(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
+ void Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
+ void Pinsrw(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
+ void Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
+ void Pinsrd(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
+ void Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
+ void Pinsrd(XMMRegister dst, Register src2, uint8_t imm8);
+ void Pinsrd(XMMRegister dst, Operand src2, uint8_t imm8);
+ void Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
+ void Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
void Psllq(XMMRegister dst, int imm8) { Psllq(dst, static_cast<byte>(imm8)); }
void Psllq(XMMRegister dst, byte imm8);
@@ -531,6 +546,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Pslld(XMMRegister dst, byte imm8);
void Psrld(XMMRegister dst, byte imm8);
+ void Pblendvb(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister mask);
+ void Blendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister mask);
+ void Blendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister mask);
+
+ // Supports both SSE and AVX. Move src1 to dst if they are not equal on SSE.
+ void Pshufb(XMMRegister dst, XMMRegister src1, XMMRegister src2);
+
void CompareRoot(Register with, RootIndex index);
void CompareRoot(Operand with, RootIndex index);
@@ -686,7 +711,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Loads a field containing off-heap pointer and does necessary decoding
// if V8 heap sandbox is enabled.
- void LoadExternalPointerField(Register destination, Operand field_operand);
+ void LoadExternalPointerField(Register destination, Operand field_operand,
+ ExternalPointerTag tag);
protected:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -879,6 +905,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void Cmp(Operand dst, Handle<Object> source);
void Cmp(Register dst, Smi src);
void Cmp(Operand dst, Smi src);
+ void Cmp(Register dst, int32_t src);
// Checks if value is in range [lower_limit, higher_limit] using a single
// comparison.
@@ -1008,6 +1035,13 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void DecrementCounter(StatsCounter* counter, int value);
// ---------------------------------------------------------------------------
+ // Stack limit utilities
+ Operand StackLimitAsOperand(StackLimitKind kind);
+ void StackOverflowCheck(
+ Register num_args, Register scratch, Label* stack_overflow,
+ Label::Distance stack_overflow_distance = Label::kFar);
+
+ // ---------------------------------------------------------------------------
// In-place weak references.
void LoadWeakValue(Register in_out, Label* target_if_cleared);
@@ -1044,7 +1078,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class StandardFrame;
+ friend class CommonFrame;
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/common/DIR_METADATA b/deps/v8/src/common/DIR_METADATA
new file mode 100644
index 0000000000..2f8dbbcf45
--- /dev/null
+++ b/deps/v8/src/common/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/common/OWNERS b/deps/v8/src/common/OWNERS
index 4750620072..48d72aea5e 100644
--- a/deps/v8/src/common/OWNERS
+++ b/deps/v8/src/common/OWNERS
@@ -1,3 +1 @@
file:../../COMMON_OWNERS
-
-# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/common/assert-scope.cc b/deps/v8/src/common/assert-scope.cc
index 531ac4e024..520826349d 100644
--- a/deps/v8/src/common/assert-scope.cc
+++ b/deps/v8/src/common/assert-scope.cc
@@ -130,6 +130,8 @@ template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>;
template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>;
template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, false>;
template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>;
+template class PerThreadAssertScope<CODE_ALLOCATION_ASSERT, false>;
+template class PerThreadAssertScope<CODE_ALLOCATION_ASSERT, true>;
template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>;
template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>;
diff --git a/deps/v8/src/common/assert-scope.h b/deps/v8/src/common/assert-scope.h
index b958ca4bed..8937197d26 100644
--- a/deps/v8/src/common/assert-scope.h
+++ b/deps/v8/src/common/assert-scope.h
@@ -33,6 +33,7 @@ enum PerThreadAssertType {
HANDLE_ALLOCATION_ASSERT,
HANDLE_DEREFERENCE_ASSERT,
CODE_DEPENDENCY_CHANGE_ASSERT,
+ CODE_ALLOCATION_ASSERT,
LAST_PER_THREAD_ASSERT_TYPE
};
@@ -128,9 +129,17 @@ using AllowHandleAllocation =
PerThreadAssertScopeDebugOnly<HANDLE_ALLOCATION_ASSERT, true>;
// Scope to document where we do not expect garbage collections. It differs from
-// DisallowHeapAllocation by also forbiding safepoints.
+// DisallowHeapAllocation by also forbidding safepoints.
using DisallowGarbageCollection =
PerThreadAssertScopeDebugOnly<GARBAGE_COLLECTION_ASSERT, false>;
+// The DISALLOW_GARBAGE_COLLECTION macro can be used to define a
+// DisallowGarbageCollection field in classes that isn't present in release
+// builds.
+#ifdef DEBUG
+#define DISALLOW_GARBAGE_COLLECTION(name) DisallowGarbageCollection name;
+#else
+#define DISALLOW_GARBAGE_COLLECTION(name)
+#endif
// Scope to introduce an exception to DisallowGarbageCollection.
using AllowGarbageCollection =
@@ -140,6 +149,9 @@ using AllowGarbageCollection =
// and will eventually be removed, use DisallowGarbageCollection instead.
using DisallowHeapAllocation =
PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, false>;
+// The DISALLOW_HEAP_ALLOCATION macro can be used to define a
+// DisallowHeapAllocation field in classes that isn't present in release
+// builds.
#ifdef DEBUG
#define DISALLOW_HEAP_ALLOCATION(name) DisallowHeapAllocation name;
#else
@@ -166,6 +178,14 @@ using DisallowCodeDependencyChange =
using AllowCodeDependencyChange =
PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, true>;
+// Scope to document where we do not expect code to be allocated.
+using DisallowCodeAllocation =
+ PerThreadAssertScopeDebugOnly<CODE_ALLOCATION_ASSERT, false>;
+
+// Scope to introduce an exception to DisallowCodeAllocation.
+using AllowCodeAllocation =
+ PerThreadAssertScopeDebugOnly<CODE_ALLOCATION_ASSERT, true>;
+
class DisallowHeapAccess {
DisallowCodeDependencyChange no_dependency_change_;
DisallowHandleAllocation no_handle_allocation_;
@@ -273,6 +293,8 @@ extern template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>;
extern template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT,
false>;
extern template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>;
+extern template class PerThreadAssertScope<CODE_ALLOCATION_ASSERT, false>;
+extern template class PerThreadAssertScope<CODE_ALLOCATION_ASSERT, true>;
extern template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>;
extern template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>;
diff --git a/deps/v8/src/common/external-pointer-inl.h b/deps/v8/src/common/external-pointer-inl.h
index 32a78002e1..070d787b63 100644
--- a/deps/v8/src/common/external-pointer-inl.h
+++ b/deps/v8/src/common/external-pointer-inl.h
@@ -12,18 +12,93 @@
namespace v8 {
namespace internal {
-V8_INLINE ExternalPointer_t EncodeExternalPointer(Isolate* isolate,
- Address external_pointer) {
+V8_INLINE Address DecodeExternalPointer(IsolateRoot isolate_root,
+ ExternalPointer_t encoded_pointer,
+ ExternalPointerTag tag) {
STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
- if (!V8_HEAP_SANDBOX_BOOL) return external_pointer;
- return external_pointer ^ kExternalPointerSalt;
+#ifdef V8_HEAP_SANDBOX
+ uint32_t index = static_cast<uint32_t>(encoded_pointer);
+ const Isolate* isolate = Isolate::FromRootAddress(isolate_root.address());
+ return isolate->external_pointer_table().get(index) ^ tag;
+#else
+ return encoded_pointer;
+#endif
}
-V8_INLINE Address DecodeExternalPointer(const Isolate* isolate,
- ExternalPointer_t encoded_pointer) {
- STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
- if (!V8_HEAP_SANDBOX_BOOL) return encoded_pointer;
- return encoded_pointer ^ kExternalPointerSalt;
+V8_INLINE void InitExternalPointerField(Address field_address,
+ Isolate* isolate) {
+#ifdef V8_HEAP_SANDBOX
+ static_assert(kExternalPointerSize == kSystemPointerSize,
+ "Review the code below, once kExternalPointerSize is 4-byte "
+ "the address of the field will always be aligned");
+ ExternalPointer_t index = isolate->external_pointer_table().allocate();
+ base::WriteUnalignedValue<ExternalPointer_t>(field_address, index);
+#else
+ // Nothing to do.
+#endif // V8_HEAP_SANDBOX
+}
+
+V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
+ Address value, ExternalPointerTag tag) {
+#ifdef V8_HEAP_SANDBOX
+ ExternalPointer_t index = isolate->external_pointer_table().allocate();
+ isolate->external_pointer_table().set(static_cast<uint32_t>(index),
+ value ^ tag);
+ static_assert(kExternalPointerSize == kSystemPointerSize,
+ "Review the code below, once kExternalPointerSize is 4-byte "
+ "the address of the field will always be aligned");
+ base::WriteUnalignedValue<ExternalPointer_t>(field_address, index);
+#else
+ // Pointer compression causes types larger than kTaggedSize to be unaligned.
+ constexpr bool v8_pointer_compression_unaligned =
+ kExternalPointerSize > kTaggedSize;
+ ExternalPointer_t encoded_value = static_cast<ExternalPointer_t>(value);
+ if (v8_pointer_compression_unaligned) {
+ base::WriteUnalignedValue<ExternalPointer_t>(field_address, encoded_value);
+ } else {
+ base::Memory<ExternalPointer_t>(field_address) = encoded_value;
+ }
+#endif // V8_HEAP_SANDBOX
+}
+
+V8_INLINE Address ReadExternalPointerField(Address field_address,
+ IsolateRoot isolate_root,
+ ExternalPointerTag tag) {
+ // Pointer compression causes types larger than kTaggedSize to be unaligned.
+ constexpr bool v8_pointer_compression_unaligned =
+ kExternalPointerSize > kTaggedSize;
+ ExternalPointer_t encoded_value;
+ if (v8_pointer_compression_unaligned) {
+ encoded_value = base::ReadUnalignedValue<ExternalPointer_t>(field_address);
+ } else {
+ encoded_value = base::Memory<ExternalPointer_t>(field_address);
+ }
+ return DecodeExternalPointer(isolate_root, encoded_value, tag);
+}
+
+V8_INLINE void WriteExternalPointerField(Address field_address,
+ Isolate* isolate, Address value,
+ ExternalPointerTag tag) {
+#ifdef V8_HEAP_SANDBOX
+ static_assert(kExternalPointerSize == kSystemPointerSize,
+ "Review the code below, once kExternalPointerSize is 4-byte "
+ "the address of the field will always be aligned");
+
+ ExternalPointer_t index =
+ base::ReadUnalignedValue<ExternalPointer_t>(field_address);
+ isolate->external_pointer_table().set(static_cast<uint32_t>(index),
+ value ^ tag);
+#else
+ // Pointer compression causes types larger than kTaggedSize to be unaligned.
+ constexpr bool v8_pointer_compression_unaligned =
+ kExternalPointerSize > kTaggedSize;
+ ExternalPointer_t encoded_value = static_cast<ExternalPointer_t>(value);
+ if (v8_pointer_compression_unaligned) {
+ base::WriteUnalignedValue<ExternalPointer_t>(field_address, encoded_value);
+ } else {
+ base::Memory<ExternalPointer_t>(field_address) = encoded_value;
+ }
+#endif // V8_HEAP_SANDBOX
}
} // namespace internal
diff --git a/deps/v8/src/common/external-pointer.h b/deps/v8/src/common/external-pointer.h
index 9b5b061997..5a380df762 100644
--- a/deps/v8/src/common/external-pointer.h
+++ b/deps/v8/src/common/external-pointer.h
@@ -10,22 +10,37 @@
namespace v8 {
namespace internal {
-// See v8:10391 for details about V8 heap sandbox.
-constexpr uint32_t kExternalPointerSalt =
- 0x7fffffff & ~static_cast<uint32_t>(kHeapObjectTagMask);
-
-static_assert(static_cast<int32_t>(kExternalPointerSalt) >= 0,
- "Salt value must be positive for better assembly code");
-
-// Convert external pointer value into encoded form suitable for being stored
-// on V8 heap.
-V8_INLINE ExternalPointer_t EncodeExternalPointer(Isolate* isolate,
- Address external_pointer);
-
// Convert external pointer from on-V8-heap representation to an actual external
// pointer value.
-V8_INLINE Address DecodeExternalPointer(const Isolate* isolate,
- ExternalPointer_t encoded_pointer);
+V8_INLINE Address DecodeExternalPointer(IsolateRoot isolate,
+ ExternalPointer_t encoded_pointer,
+ ExternalPointerTag tag);
+
+constexpr ExternalPointer_t kNullExternalPointer = 0;
+
+// Creates uninitialized entry in external pointer table and writes the entry id
+// to the field.
+// When sandbox is not enabled, it's a no-op.
+V8_INLINE void InitExternalPointerField(Address field_address,
+ Isolate* isolate);
+
+// Creates and initializes entry in external pointer table and writes the entry
+// id to the field.
+// Basically, it's InitExternalPointerField() followed by
+// WriteExternalPointerField().
+V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
+ Address value, ExternalPointerTag tag);
+
+// Reads external pointer for the field, and decodes it if the sandbox is
+// enabled.
+V8_INLINE Address ReadExternalPointerField(Address field_address,
+ IsolateRoot isolate,
+ ExternalPointerTag tag);
+
+// Encodes value if the sandbox is enabled and writes it into the field.
+V8_INLINE void WriteExternalPointerField(Address field_address,
+ Isolate* isolate, Address value,
+ ExternalPointerTag tag);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index 0e9d815207..988ab10c15 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -103,6 +103,13 @@ STATIC_ASSERT(V8_DEFAULT_STACK_SIZE_KB* KB +
#define V8_DOUBLE_FIELDS_UNBOXING false
#endif
+// Determine whether dict mode prototypes feature is enabled.
+#ifdef V8_DICT_MODE_PROTOTYPES
+#define V8_DICT_MODE_PROTOTYPES_BOOL true
+#else
+#define V8_DICT_MODE_PROTOTYPES_BOOL false
+#endif
+
// Determine whether tagged pointers are 8 bytes (used in Torque layouts for
// choosing where to insert padding).
#if V8_TARGET_ARCH_64_BIT && !defined(V8_COMPRESS_POINTERS)
@@ -189,9 +196,8 @@ constexpr int kDoubleSizeLog2 = 3;
// Total wasm code space per engine (i.e. per process) is limited to make
// certain attacks that rely on heap spraying harder.
-// This limit was increased to 2GB in August 2020 and we have security clearance
-// to increase to 4GB if needed.
-constexpr size_t kMaxWasmCodeMB = 2048;
+// Just below 4GB, such that {kMaxWasmCodeMemory} fits in a 32-bit size_t.
+constexpr size_t kMaxWasmCodeMB = 4095;
constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB;
#if V8_HOST_ARCH_64_BIT
@@ -295,7 +301,6 @@ STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
// This type defines raw storage type for external (or off-V8 heap) pointers
// stored on V8 heap.
-using ExternalPointer_t = Address;
constexpr int kExternalPointerSize = sizeof(ExternalPointer_t);
constexpr int kEmbedderDataSlotSize = kSystemPointerSize;
@@ -465,8 +470,11 @@ enum class DeoptimizeKind : uint8_t {
kSoft,
kBailout,
kLazy,
- kLastDeoptimizeKind = kLazy
};
+constexpr DeoptimizeKind kFirstDeoptimizeKind = DeoptimizeKind::kEager;
+constexpr DeoptimizeKind kLastDeoptimizeKind = DeoptimizeKind::kLazy;
+STATIC_ASSERT(static_cast<int>(kFirstDeoptimizeKind) == 0);
+constexpr int kDeoptimizeKindCount = static_cast<int>(kLastDeoptimizeKind) + 1;
inline size_t hash_value(DeoptimizeKind kind) {
return static_cast<size_t>(kind);
}
@@ -481,23 +489,8 @@ inline std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
case DeoptimizeKind::kBailout:
return os << "Bailout";
}
- UNREACHABLE();
}
-enum class IsolateAllocationMode {
- // Allocate Isolate in C++ heap using default new/delete operators.
- kInCppHeap,
-
- // Allocate Isolate in a committed region inside V8 heap reservation.
- kInV8Heap,
-
-#ifdef V8_COMPRESS_POINTERS
- kDefault = kInV8Heap,
-#else
- kDefault = kInCppHeap,
-#endif
-};
-
// Indicates whether the lookup is related to sloppy-mode block-scoped
// function hoisting, and is a synthetic assignment for that.
enum class LookupHoistingMode { kNormal, kLegacySloppy };
@@ -795,12 +788,7 @@ inline std::ostream& operator<<(std::ostream& os, AllocationType kind) {
}
// TODO(ishell): review and rename kWordAligned to kTaggedAligned.
-enum AllocationAlignment {
- kWordAligned,
- kDoubleAligned,
- kDoubleUnaligned,
- kCodeAligned
-};
+enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
enum class AccessMode { ATOMIC, NON_ATOMIC };
@@ -899,15 +887,7 @@ enum ShouldThrow {
kDontThrow = Internals::kDontThrow
};
-// The Store Buffer (GC).
-enum StoreBufferEvent {
- kStoreBufferFullEvent,
- kStoreBufferStartScanningPagesEvent,
- kStoreBufferScanningPageEvent
-};
-
-using StoreBufferCallback = void (*)(Heap* heap, MemoryChunk* page,
- StoreBufferEvent event);
+enum class ThreadKind { kMain, kBackground };
// Union used for customized checking of the IEEE double types
// inlined within v8 runtime, rather than going to the underlying
@@ -1426,22 +1406,21 @@ enum class Operation {
// at different points by performing an 'OR' operation. Type feedback moves
// to a more generic type when we combine feedback.
// kNone -> kEnumCacheKeysAndIndices -> kEnumCacheKeys -> kAny
-class ForInFeedback {
- public:
- enum {
- kNone = 0x0,
- kEnumCacheKeysAndIndices = 0x1,
- kEnumCacheKeys = 0x3,
- kAny = 0x7
- };
+enum class ForInFeedback : uint8_t {
+ kNone = 0x0,
+ kEnumCacheKeysAndIndices = 0x1,
+ kEnumCacheKeys = 0x3,
+ kAny = 0x7
};
-STATIC_ASSERT((ForInFeedback::kNone |
- ForInFeedback::kEnumCacheKeysAndIndices) ==
- ForInFeedback::kEnumCacheKeysAndIndices);
-STATIC_ASSERT((ForInFeedback::kEnumCacheKeysAndIndices |
- ForInFeedback::kEnumCacheKeys) == ForInFeedback::kEnumCacheKeys);
-STATIC_ASSERT((ForInFeedback::kEnumCacheKeys | ForInFeedback::kAny) ==
- ForInFeedback::kAny);
+STATIC_ASSERT((static_cast<int>(ForInFeedback::kNone) |
+ static_cast<int>(ForInFeedback::kEnumCacheKeysAndIndices)) ==
+ static_cast<int>(ForInFeedback::kEnumCacheKeysAndIndices));
+STATIC_ASSERT((static_cast<int>(ForInFeedback::kEnumCacheKeysAndIndices) |
+ static_cast<int>(ForInFeedback::kEnumCacheKeys)) ==
+ static_cast<int>(ForInFeedback::kEnumCacheKeys));
+STATIC_ASSERT((static_cast<int>(ForInFeedback::kEnumCacheKeys) |
+ static_cast<int>(ForInFeedback::kAny)) ==
+ static_cast<int>(ForInFeedback::kAny));
enum class UnicodeEncoding : uint8_t {
// Different unicode encodings in a |word32|:
@@ -1530,13 +1509,31 @@ inline std::ostream& operator<<(std::ostream& os,
using FileAndLine = std::pair<const char*, int>;
-enum class OptimizationMarker {
- kLogFirstExecution,
- kNone,
- kCompileOptimized,
- kCompileOptimizedConcurrent,
- kInOptimizationQueue
+enum OptimizationMarker : int32_t {
+ // These values are set so that it is easy to check if there is a marker where
+ // some processing needs to be done.
+ kNone = 0b000,
+ kInOptimizationQueue = 0b001,
+ kCompileOptimized = 0b010,
+ kCompileOptimizedConcurrent = 0b011,
+ kLogFirstExecution = 0b100,
+ kLastOptimizationMarker = kLogFirstExecution
};
+// For kNone or kInOptimizationQueue we don't need any special processing.
+// To check both cases using a single mask, we expect the kNone to be 0 and
+// kInOptimizationQueue to be 1 so that we can mask off the lsb for checking.
+STATIC_ASSERT(kNone == 0b000 && kInOptimizationQueue == 0b001);
+STATIC_ASSERT(kLastOptimizationMarker <= 0b111);
+static constexpr uint32_t kNoneOrInOptimizationQueueMask = 0b110;
+
+inline bool IsInOptimizationQueueMarker(OptimizationMarker marker) {
+ return marker == OptimizationMarker::kInOptimizationQueue;
+}
+
+inline bool IsCompileOptimizedMarker(OptimizationMarker marker) {
+ return marker == OptimizationMarker::kCompileOptimized ||
+ marker == OptimizationMarker::kCompileOptimizedConcurrent;
+}
inline std::ostream& operator<<(std::ostream& os,
const OptimizationMarker& marker) {
@@ -1552,8 +1549,27 @@ inline std::ostream& operator<<(std::ostream& os,
case OptimizationMarker::kInOptimizationQueue:
return os << "OptimizationMarker::kInOptimizationQueue";
}
- UNREACHABLE();
- return os;
+}
+
+enum class OptimizationTier {
+ kNone = 0b00,
+ kMidTier = 0b01,
+ kTopTier = 0b10,
+ kLastOptimizationTier = kTopTier
+};
+static constexpr uint32_t kNoneOrMidTierMask = 0b10;
+static constexpr uint32_t kNoneMask = 0b11;
+
+inline std::ostream& operator<<(std::ostream& os,
+ const OptimizationTier& tier) {
+ switch (tier) {
+ case OptimizationTier::kNone:
+ return os << "OptimizationTier::kNone";
+ case OptimizationTier::kMidTier:
+ return os << "OptimizationTier::kMidTier";
+ case OptimizationTier::kTopTier:
+ return os << "OptimizationTier::kTopTier";
+ }
}
enum class SpeculationMode { kAllowSpeculation, kDisallowSpeculation };
@@ -1618,7 +1634,6 @@ enum class LoadSensitivity {
V(TrapDivUnrepresentable) \
V(TrapRemByZero) \
V(TrapFloatUnrepresentable) \
- V(TrapFuncInvalid) \
V(TrapFuncSigMismatch) \
V(TrapDataSegmentDropped) \
V(TrapElemSegmentDropped) \
@@ -1627,7 +1642,6 @@ enum class LoadSensitivity {
V(TrapRethrowNull) \
V(TrapNullDereference) \
V(TrapIllegalCast) \
- V(TrapWasmJSFunction) \
V(TrapArrayOutOfBounds)
enum KeyedAccessLoadMode {
@@ -1695,7 +1709,67 @@ enum class TraceRetainingPathMode { kEnabled, kDisabled };
// can be used in Torque.
enum class VariableAllocationInfo { NONE, STACK, CONTEXT, UNUSED };
+enum class DynamicMapChecksStatus : uint8_t {
+ kSuccess = 0,
+ kBailout = 1,
+ kDeopt = 2
+};
+
+#ifdef V8_COMPRESS_POINTERS
+class IsolateRoot {
+ public:
+ explicit constexpr IsolateRoot(Address address) : address_(address) {}
+ // NOLINTNEXTLINE
+ inline IsolateRoot(const Isolate* isolate);
+ // NOLINTNEXTLINE
+ inline IsolateRoot(const LocalIsolate* isolate);
+
+ inline Address address() const;
+
+ private:
+ Address address_;
+};
+#else
+class IsolateRoot {
+ public:
+ IsolateRoot() = default;
+ // NOLINTNEXTLINE
+ IsolateRoot(const Isolate* isolate) {}
+ // NOLINTNEXTLINE
+ IsolateRoot(const LocalIsolate* isolate) {}
+};
+#endif
+
+class int31_t {
+ public:
+ constexpr int31_t() : value_(0) {}
+ constexpr int31_t(int value) : value_(value) { // NOLINT(runtime/explicit)
+ DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
+ }
+ int31_t& operator=(int value) {
+ DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
+ value_ = value;
+ return *this;
+ }
+ int32_t value() const { return value_; }
+ operator int32_t() const { return value_; }
+
+ private:
+ int32_t value_;
+};
+
} // namespace internal
+
+// Tag dispatching support for acquire loads and release stores.
+struct AcquireLoadTag {};
+struct RelaxedLoadTag {};
+struct ReleaseStoreTag {};
+struct RelaxedStoreTag {};
+static constexpr AcquireLoadTag kAcquireLoad;
+static constexpr RelaxedLoadTag kRelaxedLoad;
+static constexpr ReleaseStoreTag kReleaseStore;
+static constexpr RelaxedStoreTag kRelaxedStore;
+
} // namespace v8
namespace i = v8::internal;
diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h
index dc4d7581f1..c8ff902642 100644
--- a/deps/v8/src/common/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -92,6 +92,7 @@ namespace internal {
T(IllegalInvocation, "Illegal invocation") \
T(ImmutablePrototypeSet, \
"Immutable prototype object '%' cannot have their prototype set") \
+ T(ImportAssertionDuplicateKey, "Import assertion has duplicate key '%'") \
T(ImportCallNotNewExpression, "Cannot use new with import") \
T(ImportOutsideModule, "Cannot use import statement outside a module") \
T(ImportMetaOutsideModule, "Cannot use 'import.meta' outside a module") \
@@ -364,6 +365,8 @@ namespace internal {
T(ToRadixFormatRange, "toString() radix argument must be between 2 and 36") \
T(TypedArraySetOffsetOutOfBounds, "offset is out of bounds") \
T(TypedArraySetSourceTooLarge, "Source is too large") \
+ T(TypedArrayTooLargeToSort, \
+ "Custom comparefn not supported for huge TypedArrays") \
T(ValueOutOfRange, "Value % out of range for % options property %") \
/* SyntaxError */ \
T(AmbiguousExport, \
@@ -392,6 +395,8 @@ namespace internal {
"Async functions can only be declared at the top level or inside a " \
"block.") \
T(IllegalBreak, "Illegal break statement") \
+ T(ModuleExportNameWithoutFromClause, \
+ "String literal module export names must be followed by a 'from' clause") \
T(NoIterationStatement, \
"Illegal continue statement: no surrounding iteration statement") \
T(IllegalContinue, \
@@ -416,6 +421,8 @@ namespace internal {
"Invalid left-hand side expression in postfix operation") \
T(InvalidLhsInPrefixOp, \
"Invalid left-hand side expression in prefix operation") \
+ T(InvalidModuleExportName, \
+ "Invalid module export name: contains unpaired surrogate") \
T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'") \
T(InvalidOrUnexpectedToken, "Invalid or unexpected token") \
T(InvalidPrivateBrand, "Object must be an instance of class %") \
@@ -551,19 +558,17 @@ namespace internal {
T(WasmTrapDivUnrepresentable, "divide result unrepresentable") \
T(WasmTrapRemByZero, "remainder by zero") \
T(WasmTrapFloatUnrepresentable, "float unrepresentable in integer range") \
- T(WasmTrapFuncInvalid, "invalid index into function table") \
+ T(WasmTrapTableOutOfBounds, "table index is out of bounds") \
T(WasmTrapFuncSigMismatch, "function signature mismatch") \
T(WasmTrapMultiReturnLengthMismatch, "multi-return length mismatch") \
- T(WasmTrapTypeError, "wasm function signature contains illegal type") \
+ T(WasmTrapJSTypeError, "type incompatibility when transforming from/to JS") \
T(WasmTrapDataSegmentDropped, "data segment has been dropped") \
T(WasmTrapElemSegmentDropped, "element segment has been dropped") \
- T(WasmTrapTableOutOfBounds, "table access out of bounds") \
T(WasmTrapBrOnExnNull, "br_on_exn on null value") \
T(WasmTrapRethrowNull, "rethrowing null value") \
T(WasmTrapNullDereference, "dereferencing a null pointer") \
T(WasmTrapIllegalCast, "illegal cast") \
T(WasmTrapArrayOutOfBounds, "array element access out of bounds") \
- T(WasmTrapWasmJSFunction, "cannot call WebAssembly.Function with call_ref") \
T(WasmExceptionError, "wasm exception") \
/* Asm.js validation related */ \
T(AsmJsInvalid, "Invalid asm.js: %") \
diff --git a/deps/v8/src/common/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h
index ad0b17ff5e..f74c4d82c9 100644
--- a/deps/v8/src/common/ptr-compr-inl.h
+++ b/deps/v8/src/common/ptr-compr-inl.h
@@ -8,32 +8,37 @@
#include "include/v8-internal.h"
#include "src/common/ptr-compr.h"
#include "src/execution/isolate.h"
+#include "src/execution/local-isolate-inl.h"
namespace v8 {
namespace internal {
-#if V8_TARGET_ARCH_64_BIT
+#ifdef V8_COMPRESS_POINTERS
+
+IsolateRoot::IsolateRoot(const Isolate* isolate)
+ : address_(isolate->isolate_root()) {}
+IsolateRoot::IsolateRoot(const LocalIsolate* isolate)
+ : address_(isolate->isolate_root()) {}
+
+Address IsolateRoot::address() const {
+ Address ret = address_;
+ ret = reinterpret_cast<Address>(V8_ASSUME_ALIGNED(
+ reinterpret_cast<void*>(ret), kPtrComprIsolateRootAlignment));
+ return ret;
+}
+
// Compresses full-pointer representation of a tagged value to on-heap
// representation.
V8_INLINE Tagged_t CompressTagged(Address tagged) {
return static_cast<Tagged_t>(static_cast<uint32_t>(tagged));
}
-V8_INLINE Address GetIsolateRoot(Address on_heap_addr) {
- // We subtract 1 here in order to let the compiler generate addition of 32-bit
- // signed constant instead of 64-bit constant (the problem is that 2Gb looks
- // like a negative 32-bit value). It's correct because we will never use
- // leftmost address of V8 heap as |on_heap_addr|.
+V8_INLINE constexpr Address GetIsolateRootAddress(Address on_heap_addr) {
return RoundDown<kPtrComprIsolateRootAlignment>(on_heap_addr);
}
-V8_INLINE Address GetIsolateRoot(const Isolate* isolate) {
- Address isolate_root = isolate->isolate_root();
-#ifdef V8_COMPRESS_POINTERS
- isolate_root = reinterpret_cast<Address>(V8_ASSUME_ALIGNED(
- reinterpret_cast<void*>(isolate_root), kPtrComprIsolateRootAlignment));
-#endif
- return isolate_root;
+V8_INLINE Address GetIsolateRootAddress(IsolateRoot isolate) {
+ return isolate.address();
}
// Decompresses smi value.
@@ -47,7 +52,7 @@ V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) {
template <typename TOnHeapAddress>
V8_INLINE Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
Tagged_t raw_value) {
- return GetIsolateRoot(on_heap_addr) + static_cast<Address>(raw_value);
+ return GetIsolateRootAddress(on_heap_addr) + static_cast<Address>(raw_value);
}
// Decompresses any tagged value, preserving both weak- and smi- tags.
@@ -57,22 +62,18 @@ V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
return DecompressTaggedPointer(on_heap_addr, raw_value);
}
-#ifdef V8_COMPRESS_POINTERS
-
STATIC_ASSERT(kPtrComprHeapReservationSize ==
Internals::kPtrComprHeapReservationSize);
STATIC_ASSERT(kPtrComprIsolateRootAlignment ==
Internals::kPtrComprIsolateRootAlignment);
-#endif // V8_COMPRESS_POINTERS
-
#else
V8_INLINE Tagged_t CompressTagged(Address tagged) { UNREACHABLE(); }
-V8_INLINE Address GetIsolateRoot(Address on_heap_addr) { UNREACHABLE(); }
+V8_INLINE Address GetIsolateRootAddress(Address on_heap_addr) { UNREACHABLE(); }
-V8_INLINE Address GetIsolateRoot(const Isolate* isolate) { UNREACHABLE(); }
+V8_INLINE Address GetIsolateRootAddress(IsolateRoot isolate) { UNREACHABLE(); }
V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) { UNREACHABLE(); }
@@ -88,7 +89,7 @@ V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
UNREACHABLE();
}
-#endif // V8_TARGET_ARCH_64_BIT
+#endif // V8_COMPRESS_POINTERS
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/common/ptr-compr.h b/deps/v8/src/common/ptr-compr.h
index 105d5f1a4f..0c82c2328c 100644
--- a/deps/v8/src/common/ptr-compr.h
+++ b/deps/v8/src/common/ptr-compr.h
@@ -7,7 +7,7 @@
#include "src/common/globals.h"
-#if V8_TARGET_ARCH_64_BIT
+#ifdef V8_COMPRESS_POINTERS
namespace v8 {
namespace internal {
@@ -19,6 +19,6 @@ constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB;
} // namespace internal
} // namespace v8
-#endif // V8_TARGET_ARCH_64_BIT
+#endif // V8_COMPRESS_POINTERS
#endif // V8_COMMON_PTR_COMPR_H_
diff --git a/deps/v8/src/compiler-dispatcher/DIR_METADATA b/deps/v8/src/compiler-dispatcher/DIR_METADATA
new file mode 100644
index 0000000000..fc018666b1
--- /dev/null
+++ b/deps/v8/src/compiler-dispatcher/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Compiler"
+} \ No newline at end of file
diff --git a/deps/v8/src/compiler-dispatcher/OWNERS b/deps/v8/src/compiler-dispatcher/OWNERS
index b71c01a305..7bc22f1662 100644
--- a/deps/v8/src/compiler-dispatcher/OWNERS
+++ b/deps/v8/src/compiler-dispatcher/OWNERS
@@ -2,5 +2,3 @@ ahaas@chromium.org
jkummerow@chromium.org
leszeks@chromium.org
rmcilroy@chromium.org
-
-# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index c20a38a7e4..a22c79e0ad 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -114,7 +114,7 @@ void CompilerDispatcher::RegisterSharedFunctionInfo(
auto job_it = jobs_.find(job_id);
DCHECK_NE(job_it, jobs_.end());
Job* job = job_it->second.get();
- shared_to_unoptimized_job_id_.Set(function_handle, job_id);
+ shared_to_unoptimized_job_id_.Insert(function_handle, job_id);
{
base::MutexGuard lock(&mutex_);
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 528a9babe3..931a9e197b 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -8,6 +8,8 @@
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/execution/isolate.h"
+#include "src/execution/local-isolate.h"
+#include "src/heap/local-heap.h"
#include "src/init/v8.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
@@ -56,6 +58,7 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
private:
// v8::Task overrides.
void RunInternal() override {
+ LocalIsolate local_isolate(isolate_, ThreadKind::kBackground);
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
@@ -76,8 +79,8 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
dispatcher_->recompilation_delay_));
}
- dispatcher_->CompileNext(dispatcher_->NextInput(true),
- runtime_call_stats_scope.Get());
+ dispatcher_->CompileNext(dispatcher_->NextInput(&local_isolate, true),
+ runtime_call_stats_scope.Get(), &local_isolate);
}
{
base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
@@ -106,7 +109,7 @@ OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
}
OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
- bool check_if_flushing) {
+ LocalIsolate* local_isolate, bool check_if_flushing) {
base::MutexGuard access_input_queue_(&input_queue_mutex_);
if (input_queue_length_ == 0) return nullptr;
OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
@@ -115,6 +118,7 @@ OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
input_queue_length_--;
if (check_if_flushing) {
if (mode_ == FLUSH) {
+ UnparkedScope scope(local_isolate->heap());
AllowHandleDereference allow_handle_dereference;
DisposeCompilationJob(job, true);
return nullptr;
@@ -124,11 +128,12 @@ OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
}
void OptimizingCompileDispatcher::CompileNext(OptimizedCompilationJob* job,
- RuntimeCallStats* stats) {
+ RuntimeCallStats* stats,
+ LocalIsolate* local_isolate) {
if (!job) return;
// The function may have already been optimized by OSR. Simply continue.
- CompilationJob::Status status = job->ExecuteJob(stats);
+ CompilationJob::Status status = job->ExecuteJob(stats, local_isolate);
USE(status); // Prevent an unused-variable error.
{
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index 51803822d1..36f285d163 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -18,6 +18,7 @@
namespace v8 {
namespace internal {
+class LocalHeap;
class OptimizedCompilationJob;
class RuntimeCallStats;
class SharedFunctionInfo;
@@ -58,8 +59,10 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
enum ModeFlag { COMPILE, FLUSH };
void FlushOutputQueue(bool restore_function_code);
- void CompileNext(OptimizedCompilationJob* job, RuntimeCallStats* stats);
- OptimizedCompilationJob* NextInput(bool check_if_flushing = false);
+ void CompileNext(OptimizedCompilationJob* job, RuntimeCallStats* stats,
+ LocalIsolate* local_isolate);
+ OptimizedCompilationJob* NextInput(LocalIsolate* local_isolate,
+ bool check_if_flushing = false);
inline int InputQueueIndex(int i) {
int result = (i + input_queue_shift_) % input_queue_capacity_;
diff --git a/deps/v8/src/compiler/DIR_METADATA b/deps/v8/src/compiler/DIR_METADATA
new file mode 100644
index 0000000000..fc018666b1
--- /dev/null
+++ b/deps/v8/src/compiler/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Compiler"
+} \ No newline at end of file
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 9fd19af803..afc8551ae0 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -7,6 +7,7 @@ mvstanton@chromium.org
mslekova@chromium.org
jgruber@chromium.org
nicohartmann@chromium.org
+solanes@chromium.org
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=bbudge@chromium.org
@@ -20,5 +21,3 @@ per-file int64-lowering.*=ahaas@chromium.org
per-file simd-scalar-lowering.*=bbudge@chromium.org
per-file simd-scalar-lowering.*=gdeepti@chromium.org
per-file simd-scalar-lowering.*=zhin@chromium.org
-
-# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index f9d15264e6..ccb8772c4e 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -17,7 +17,6 @@
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table.h"
#include "src/objects/source-text-module.h"
-#include "torque-generated/exported-class-definitions.h"
namespace v8 {
namespace internal {
@@ -425,20 +424,34 @@ FieldAccess AccessBuilder::ForJSTypedArrayExternalPointer() {
: Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier,
- LoadSensitivity::kCritical};
+ LoadSensitivity::kCritical,
+ ConstFieldInfo::None(),
+ false,
+#ifdef V8_HEAP_SANDBOX
+ kTypedArrayExternalPointerTag
+#endif
+ };
return access;
}
// static
FieldAccess AccessBuilder::ForJSDataViewDataPointer() {
- FieldAccess access = {kTaggedBase,
- JSDataView::kDataPointerOffset,
- MaybeHandle<Name>(),
- MaybeHandle<Map>(),
- V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
- : Type::ExternalPointer(),
- MachineType::Pointer(),
- kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase,
+ JSDataView::kDataPointerOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
+ : Type::ExternalPointer(),
+ MachineType::Pointer(),
+ kNoWriteBarrier,
+ LoadSensitivity::kUnsafe,
+ ConstFieldInfo::None(),
+ false,
+#ifdef V8_HEAP_SANDBOX
+ kDataViewDataPointerTag,
+#endif
+ };
return access;
}
@@ -734,14 +747,22 @@ FieldAccess AccessBuilder::ForSlicedStringParent() {
// static
FieldAccess AccessBuilder::ForExternalStringResourceData() {
- FieldAccess access = {kTaggedBase,
- ExternalString::kResourceDataOffset,
- Handle<Name>(),
- MaybeHandle<Map>(),
- V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
- : Type::ExternalPointer(),
- MachineType::Pointer(),
- kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase,
+ ExternalString::kResourceDataOffset,
+ Handle<Name>(),
+ MaybeHandle<Map>(),
+ V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
+ : Type::ExternalPointer(),
+ MachineType::Pointer(),
+ kNoWriteBarrier,
+ LoadSensitivity::kUnsafe,
+ ConstFieldInfo::None(),
+ false,
+#ifdef V8_HEAP_SANDBOX
+ kExternalStringResourceTag,
+#endif
+ };
return access;
}
@@ -1239,21 +1260,32 @@ FieldAccess AccessBuilder::ForFeedbackCellInterruptBudget() {
}
// static
-FieldAccess AccessBuilder::ForFeedbackVectorClosureFeedbackCellArray() {
+FieldAccess AccessBuilder::ForFeedbackVectorInvocationCount() {
+ FieldAccess access = {kTaggedBase,
+ FeedbackVector::kInvocationCountOffset,
+ Handle<Name>(),
+ MaybeHandle<Map>(),
+ TypeCache::Get()->kInt32,
+ MachineType::Int32(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForFeedbackVectorFlags() {
FieldAccess access = {
- kTaggedBase, FeedbackVector::kClosureFeedbackCellArrayOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::TaggedPointer(),
- kFullWriteBarrier};
+ kTaggedBase, FeedbackVector::kFlagsOffset, Handle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get()->kUint32, MachineType::Uint32(),
+ kNoWriteBarrier};
return access;
}
// static
-FieldAccess AccessBuilder::ForFeedbackVectorOptimizedCodeWeakOrSmi() {
+FieldAccess AccessBuilder::ForFeedbackVectorClosureFeedbackCellArray() {
FieldAccess access = {
- kTaggedBase, FeedbackVector::kOptimizedCodeWeakOrSmiOffset,
+ kTaggedBase, FeedbackVector::kClosureFeedbackCellArrayOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
+ Type::Any(), MachineType::TaggedPointer(),
kFullWriteBarrier};
return access;
}
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index af5882988d..ce1e51ff23 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -344,8 +344,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForFeedbackCellInterruptBudget();
// Provides access to a FeedbackVector fields.
+ static FieldAccess ForFeedbackVectorInvocationCount();
+ static FieldAccess ForFeedbackVectorFlags();
static FieldAccess ForFeedbackVectorClosureFeedbackCellArray();
- static FieldAccess ForFeedbackVectorOptimizedCodeWeakOrSmi();
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 046927e943..ddf742e708 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -70,12 +70,13 @@ std::ostream& operator<<(std::ostream& os, AccessMode access_mode) {
UNREACHABLE();
}
-ElementAccessInfo::ElementAccessInfo(ZoneVector<Handle<Map>>&& receiver_maps,
- ElementsKind elements_kind, Zone* zone)
+ElementAccessInfo::ElementAccessInfo(
+ ZoneVector<Handle<Map>>&& lookup_start_object_maps,
+ ElementsKind elements_kind, Zone* zone)
: elements_kind_(elements_kind),
- receiver_maps_(receiver_maps),
+ lookup_start_object_maps_(lookup_start_object_maps),
transition_sources_(zone) {
- CHECK(!receiver_maps.empty());
+ CHECK(!lookup_start_object_maps.empty());
}
// static
@@ -158,27 +159,26 @@ MinimorphicLoadPropertyAccessInfo MinimorphicLoadPropertyAccessInfo::Invalid() {
PropertyAccessInfo::PropertyAccessInfo(Zone* zone)
: kind_(kInvalid),
- receiver_maps_(zone),
+ lookup_start_object_maps_(zone),
unrecorded_dependencies_(zone),
field_representation_(Representation::None()),
field_type_(Type::None()) {}
-PropertyAccessInfo::PropertyAccessInfo(Zone* zone, Kind kind,
- MaybeHandle<JSObject> holder,
- ZoneVector<Handle<Map>>&& receiver_maps)
+PropertyAccessInfo::PropertyAccessInfo(
+ Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
+ ZoneVector<Handle<Map>>&& lookup_start_object_maps)
: kind_(kind),
- receiver_maps_(receiver_maps),
+ lookup_start_object_maps_(lookup_start_object_maps),
unrecorded_dependencies_(zone),
holder_(holder),
field_representation_(Representation::None()),
field_type_(Type::None()) {}
-PropertyAccessInfo::PropertyAccessInfo(Zone* zone, Kind kind,
- MaybeHandle<JSObject> holder,
- Handle<Object> constant,
- ZoneVector<Handle<Map>>&& receiver_maps)
+PropertyAccessInfo::PropertyAccessInfo(
+ Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
+ Handle<Object> constant, ZoneVector<Handle<Map>>&& lookup_start_object_maps)
: kind_(kind),
- receiver_maps_(receiver_maps),
+ lookup_start_object_maps_(lookup_start_object_maps),
unrecorded_dependencies_(zone),
constant_(constant),
holder_(holder),
@@ -189,10 +189,10 @@ PropertyAccessInfo::PropertyAccessInfo(
Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
FieldIndex field_index, Representation field_representation,
Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
- ZoneVector<Handle<Map>>&& receiver_maps,
+ ZoneVector<Handle<Map>>&& lookup_start_object_maps,
ZoneVector<CompilationDependency const*>&& unrecorded_dependencies)
: kind_(kind),
- receiver_maps_(receiver_maps),
+ lookup_start_object_maps_(lookup_start_object_maps),
unrecorded_dependencies_(std::move(unrecorded_dependencies)),
transition_map_(transition_map),
holder_(holder),
@@ -265,9 +265,10 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
}
this->field_type_ =
Type::Union(this->field_type_, that->field_type_, zone);
- this->receiver_maps_.insert(this->receiver_maps_.end(),
- that->receiver_maps_.begin(),
- that->receiver_maps_.end());
+ this->lookup_start_object_maps_.insert(
+ this->lookup_start_object_maps_.end(),
+ that->lookup_start_object_maps_.begin(),
+ that->lookup_start_object_maps_.end());
this->unrecorded_dependencies_.insert(
this->unrecorded_dependencies_.end(),
that->unrecorded_dependencies_.begin(),
@@ -282,9 +283,10 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
if (this->constant_.address() == that->constant_.address()) {
DCHECK(this->unrecorded_dependencies_.empty());
DCHECK(that->unrecorded_dependencies_.empty());
- this->receiver_maps_.insert(this->receiver_maps_.end(),
- that->receiver_maps_.begin(),
- that->receiver_maps_.end());
+ this->lookup_start_object_maps_.insert(
+ this->lookup_start_object_maps_.end(),
+ that->lookup_start_object_maps_.begin(),
+ that->lookup_start_object_maps_.end());
return true;
}
return false;
@@ -294,9 +296,10 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
case kStringLength: {
DCHECK(this->unrecorded_dependencies_.empty());
DCHECK(that->unrecorded_dependencies_.empty());
- this->receiver_maps_.insert(this->receiver_maps_.end(),
- that->receiver_maps_.begin(),
- that->receiver_maps_.end());
+ this->lookup_start_object_maps_.insert(
+ this->lookup_start_object_maps_.end(),
+ that->lookup_start_object_maps_.begin(),
+ that->lookup_start_object_maps_.end());
return true;
}
case kModuleExport:
@@ -364,7 +367,8 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
Handle<Map> receiver_map, Handle<Map> map, MaybeHandle<JSObject> holder,
InternalIndex descriptor, AccessMode access_mode) const {
DCHECK(descriptor.is_found());
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate());
PropertyDetails const details = descriptors->GetDetails(descriptor);
int index = descriptors->GetFieldIndex(descriptor);
Representation details_representation = details.representation();
@@ -429,7 +433,7 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
PropertyConstness constness;
if (details.IsReadOnly() && !details.IsConfigurable()) {
constness = PropertyConstness::kConst;
- } else if (FLAG_turboprop && !map->is_prototype_map()) {
+ } else if (broker()->is_turboprop() && !map->is_prototype_map()) {
// The constness feedback is too unstable for the aggresive compilation
// of turboprop.
constness = PropertyConstness::kMutable;
@@ -459,7 +463,8 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
MaybeHandle<JSObject> holder, InternalIndex descriptor,
AccessMode access_mode) const {
DCHECK(descriptor.is_found());
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate());
SLOW_DCHECK(descriptor == descriptors->Search(*name, *map));
if (map->instance_type() == JS_MODULE_NAMESPACE_TYPE) {
DCHECK(map->is_prototype_map());
@@ -557,8 +562,8 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
MaybeHandle<JSObject> holder;
while (true) {
// Lookup the named property on the {map}.
- Handle<DescriptorArray> descriptors(
- map->synchronized_instance_descriptors(), isolate());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kAcquireLoad),
+ isolate());
InternalIndex const number =
descriptors->Search(*name, *map, broker()->is_concurrent_inlining());
if (number.is_found()) {
@@ -830,7 +835,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
Handle<Map> transition_map(transition, isolate());
InternalIndex const number = transition_map->LastAdded();
Handle<DescriptorArray> descriptors(
- transition_map->synchronized_instance_descriptors(), isolate());
+ transition_map->instance_descriptors(kAcquireLoad), isolate());
PropertyDetails const details = descriptors->GetDetails(number);
// Don't bother optimizing stores to read-only properties.
if (details.IsReadOnly()) {
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 65ea6a5376..aa402fe695 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -37,25 +37,25 @@ std::ostream& operator<<(std::ostream&, AccessMode);
// This class encapsulates all information required to access a certain element.
class ElementAccessInfo final {
public:
- ElementAccessInfo(ZoneVector<Handle<Map>>&& receiver_maps,
+ ElementAccessInfo(ZoneVector<Handle<Map>>&& lookup_start_object_maps,
ElementsKind elements_kind, Zone* zone);
ElementsKind elements_kind() const { return elements_kind_; }
- ZoneVector<Handle<Map>> const& receiver_maps() const {
- return receiver_maps_;
+ ZoneVector<Handle<Map>> const& lookup_start_object_maps() const {
+ return lookup_start_object_maps_;
}
ZoneVector<Handle<Map>> const& transition_sources() const {
return transition_sources_;
}
void AddTransitionSource(Handle<Map> map) {
- CHECK_EQ(receiver_maps_.size(), 1);
+ CHECK_EQ(lookup_start_object_maps_.size(), 1);
transition_sources_.push_back(map);
}
private:
ElementsKind elements_kind_;
- ZoneVector<Handle<Map>> receiver_maps_;
+ ZoneVector<Handle<Map>> lookup_start_object_maps_;
ZoneVector<Handle<Map>> transition_sources_;
};
@@ -128,26 +128,26 @@ class PropertyAccessInfo final {
Type field_type() const { return field_type_; }
Representation field_representation() const { return field_representation_; }
MaybeHandle<Map> field_map() const { return field_map_; }
- ZoneVector<Handle<Map>> const& receiver_maps() const {
- return receiver_maps_;
+ ZoneVector<Handle<Map>> const& lookup_start_object_maps() const {
+ return lookup_start_object_maps_;
}
private:
explicit PropertyAccessInfo(Zone* zone);
PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
- ZoneVector<Handle<Map>>&& receiver_maps);
+ ZoneVector<Handle<Map>>&& lookup_start_object_maps);
PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
Handle<Object> constant,
- ZoneVector<Handle<Map>>&& receiver_maps);
+ ZoneVector<Handle<Map>>&& lookup_start_object_maps);
PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map, FieldIndex field_index,
Representation field_representation, Type field_type,
Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
- ZoneVector<Handle<Map>>&& receiver_maps,
+ ZoneVector<Handle<Map>>&& lookup_start_object_maps,
ZoneVector<CompilationDependency const*>&& dependencies);
Kind kind_;
- ZoneVector<Handle<Map>> receiver_maps_;
+ ZoneVector<Handle<Map>> lookup_start_object_maps_;
ZoneVector<CompilationDependency const*> unrecorded_dependencies_;
Handle<Object> constant_;
MaybeHandle<Map> transition_map_;
@@ -258,7 +258,9 @@ class AccessInfoFactory final {
TypeCache const* const type_cache_;
Zone* const zone_;
- DISALLOW_COPY_AND_ASSIGN(AccessInfoFactory);
+ // TODO(nicohartmann@): Move to public
+ AccessInfoFactory(const AccessInfoFactory&) = delete;
+ AccessInfoFactory& operator=(const AccessInfoFactory&) = delete;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/add-type-assertions-reducer.h b/deps/v8/src/compiler/add-type-assertions-reducer.h
index 36add040e1..bd8000a06f 100644
--- a/deps/v8/src/compiler/add-type-assertions-reducer.h
+++ b/deps/v8/src/compiler/add-type-assertions-reducer.h
@@ -22,6 +22,9 @@ class V8_EXPORT_PRIVATE AddTypeAssertionsReducer final
AddTypeAssertionsReducer(Editor* editor, JSGraph* jsgraph, Zone* zone);
~AddTypeAssertionsReducer() final;
+ AddTypeAssertionsReducer(const AddTypeAssertionsReducer&) = delete;
+ AddTypeAssertionsReducer& operator=(const AddTypeAssertionsReducer&) = delete;
+
const char* reducer_name() const override {
return "AddTypeAssertionsReducer";
}
@@ -34,8 +37,6 @@ class V8_EXPORT_PRIVATE AddTypeAssertionsReducer final
Graph* graph() { return jsgraph_->graph(); }
SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
-
- DISALLOW_COPY_AND_ASSIGN(AddTypeAssertionsReducer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/allocation-builder-inl.h b/deps/v8/src/compiler/allocation-builder-inl.h
index 8a9d74e071..ff1404baa7 100644
--- a/deps/v8/src/compiler/allocation-builder-inl.h
+++ b/deps/v8/src/compiler/allocation-builder-inl.h
@@ -7,9 +7,8 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/allocation-builder.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/map-inl.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/allocation-builder.h b/deps/v8/src/compiler/allocation-builder.h
index 709146950c..c9a2570493 100644
--- a/deps/v8/src/compiler/allocation-builder.h
+++ b/deps/v8/src/compiler/allocation-builder.h
@@ -27,7 +27,7 @@ class AllocationBuilder final {
// Primitive allocation of static size.
void Allocate(int size, AllocationType allocation = AllocationType::kYoung,
Type type = Type::Any()) {
- DCHECK_LE(size, kMaxRegularHeapObjectSize);
+ DCHECK_LE(size, Heap::MaxRegularHeapObjectSize(allocation));
effect_ = graph()->NewNode(
common()->BeginRegion(RegionObservability::kNotObservable), effect_);
allocation_ =
diff --git a/deps/v8/src/compiler/backend/DIR_METADATA b/deps/v8/src/compiler/backend/DIR_METADATA
new file mode 100644
index 0000000000..fc018666b1
--- /dev/null
+++ b/deps/v8/src/compiler/backend/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Compiler"
+} \ No newline at end of file
diff --git a/deps/v8/src/compiler/backend/OWNERS b/deps/v8/src/compiler/backend/OWNERS
index d2b3198471..d55672b606 100644
--- a/deps/v8/src/compiler/backend/OWNERS
+++ b/deps/v8/src/compiler/backend/OWNERS
@@ -6,5 +6,3 @@ zhin@chromium.org
per-file register-allocator*=thibaudm@chromium.org
per-file spill-placer*=thibaudm@chromium.org
-
-# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 2c7e856239..9267cb1f0c 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -755,7 +755,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeObject(reg);
}
@@ -797,7 +797,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeObject(reg);
}
@@ -825,7 +825,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Jump(reg);
unwinding_info_writer_.MarkBlockWillExit();
@@ -962,9 +962,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- CodeGenResult result = AssembleDeoptimizerCall(exit);
- if (result != kSuccess) return result;
- unwinding_info_writer_.MarkBlockWillExit();
+ __ b(exit->label());
break;
}
case kArchRet:
@@ -2539,7 +2537,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI16x8AddSaturateS: {
+ case kArmI16x8AddSatS: {
__ vqadd(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
@@ -2552,7 +2550,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI16x8SubSaturateS: {
+ case kArmI16x8SubSatS: {
__ vqsub(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
@@ -2611,12 +2609,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmI16x8UConvertI32x4:
ASSEMBLE_NEON_NARROWING_OP(NeonU16, NeonS16);
break;
- case kArmI16x8AddSaturateU: {
+ case kArmI16x8AddSatU: {
__ vqadd(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmI16x8SubSaturateU: {
+ case kArmI16x8SubSatU: {
__ vqsub(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
@@ -2707,7 +2705,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI8x16AddSaturateS: {
+ case kArmI8x16AddSatS: {
__ vqadd(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
@@ -2717,7 +2715,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI8x16SubSaturateS: {
+ case kArmI8x16SubSatS: {
__ vqsub(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
@@ -2765,12 +2763,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmI8x16UConvertI16x8:
ASSEMBLE_NEON_NARROWING_OP(NeonU8, NeonS8);
break;
- case kArmI8x16AddSaturateU: {
+ case kArmI8x16AddSatU: {
__ vqadd(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmI8x16SubSaturateU: {
+ case kArmI8x16SubSatU: {
__ vqsub(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
@@ -3121,8 +3119,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int scratch_s_base = scratch.code() * 4;
for (int j = 0; j < 4; j++) {
uint32_t four_lanes = i.InputUint32(2 + j);
- // Ensure byte indices are in [0, 31] so masks are never NaNs.
- four_lanes &= 0x1F1F1F1F;
+ DCHECK_EQ(0, four_lanes & (table_size == 2 ? 0xF0F0F0F0 : 0xE0E0E0E0));
__ vmov(SwVfpRegister::from_code(scratch_s_base + j),
Float32::FromBits(four_lanes));
}
@@ -3210,63 +3207,75 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
- case kArmS8x16LoadSplat: {
+ case kArmS128Load8Splat: {
__ vld1r(Neon8, NeonListOperand(i.OutputSimd128Register()),
i.NeonInputOperand(0));
break;
}
- case kArmS16x8LoadSplat: {
+ case kArmS128Load16Splat: {
__ vld1r(Neon16, NeonListOperand(i.OutputSimd128Register()),
i.NeonInputOperand(0));
break;
}
- case kArmS32x4LoadSplat: {
+ case kArmS128Load32Splat: {
__ vld1r(Neon32, NeonListOperand(i.OutputSimd128Register()),
i.NeonInputOperand(0));
break;
}
- case kArmS64x2LoadSplat: {
+ case kArmS128Load64Splat: {
Simd128Register dst = i.OutputSimd128Register();
__ vld1(Neon32, NeonListOperand(dst.low()), i.NeonInputOperand(0));
__ Move(dst.high(), dst.low());
break;
}
- case kArmI16x8Load8x8S: {
+ case kArmS128Load8x8S: {
Simd128Register dst = i.OutputSimd128Register();
__ vld1(Neon8, NeonListOperand(dst.low()), i.NeonInputOperand(0));
__ vmovl(NeonS8, dst, dst.low());
break;
}
- case kArmI16x8Load8x8U: {
+ case kArmS128Load8x8U: {
Simd128Register dst = i.OutputSimd128Register();
__ vld1(Neon8, NeonListOperand(dst.low()), i.NeonInputOperand(0));
__ vmovl(NeonU8, dst, dst.low());
break;
}
- case kArmI32x4Load16x4S: {
+ case kArmS128Load16x4S: {
Simd128Register dst = i.OutputSimd128Register();
__ vld1(Neon16, NeonListOperand(dst.low()), i.NeonInputOperand(0));
__ vmovl(NeonS16, dst, dst.low());
break;
}
- case kArmI32x4Load16x4U: {
+ case kArmS128Load16x4U: {
Simd128Register dst = i.OutputSimd128Register();
__ vld1(Neon16, NeonListOperand(dst.low()), i.NeonInputOperand(0));
__ vmovl(NeonU16, dst, dst.low());
break;
}
- case kArmI64x2Load32x2S: {
+ case kArmS128Load32x2S: {
Simd128Register dst = i.OutputSimd128Register();
__ vld1(Neon32, NeonListOperand(dst.low()), i.NeonInputOperand(0));
__ vmovl(NeonS32, dst, dst.low());
break;
}
- case kArmI64x2Load32x2U: {
+ case kArmS128Load32x2U: {
Simd128Register dst = i.OutputSimd128Register();
__ vld1(Neon32, NeonListOperand(dst.low()), i.NeonInputOperand(0));
__ vmovl(NeonU32, dst, dst.low());
break;
}
+ case kArmS128Load32Zero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vmov(dst, 0);
+ __ vld1s(Neon32, NeonListOperand(dst.low()), 0, i.NeonInputOperand(0));
+ break;
+ }
+ case kArmS128Load64Zero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vmov(dst.high(), 0);
+ __ vld1(Neon64, NeonListOperand(dst.low()), i.NeonInputOperand(0));
+ break;
+ }
case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
break;
@@ -3759,9 +3768,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
@@ -3787,38 +3795,85 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
unwinding_info_writer_.MarkBlockWillExit();
+ // We might need r3 for scratch.
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & r3.bit());
ArmOperandConverter g(this, nullptr);
+ const int parameter_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
+
+ // {additional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_count != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (__ emit_debug_code()) {
+ __ cmp(g.ToRegister(additional_pop_count), Operand(0));
+ __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue);
+ }
+ }
+
+ Register argc_reg = r3;
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_count} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_count != 0;
+#else
+ const bool drop_jsargs = false;
+#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
// number of stack slot pops.
- if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
if (return_label_.is_bound()) {
__ b(&return_label_);
return;
} else {
__ bind(&return_label_);
- AssembleDeconstructFrame();
}
- } else {
- AssembleDeconstructFrame();
}
+ if (drop_jsargs) {
+ // Get the actual argument count.
+ __ ldr(argc_reg, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
}
- if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
- pop_count += g.ToConstant(pop).ToInt32();
+ if (drop_jsargs) {
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ __ add(argc_reg, argc_reg, Operand(1)); // Also pop the receiver.
+ if (parameter_count > 1) {
+ __ cmp(argc_reg, Operand(parameter_count));
+ __ mov(argc_reg, Operand(parameter_count), LeaveCC, lt);
+ }
+ __ Drop(argc_reg);
+ } else if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ __ Drop(parameter_count + additional_count);
+ } else if (parameter_count == 0) {
+ __ Drop(g.ToRegister(additional_pop_count));
} else {
- __ Drop(g.ToRegister(pop));
+ // {additional_pop_count} is guaranteed to be zero if {parameter_count !=
+ // 0}. Check RawMachineAssembler::PopAndReturn.
+ __ Drop(parameter_count);
}
- __ Drop(pop_count);
__ Ret();
}
void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {
+ __ CheckConstPool(true, false);
+}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index b3ee561e27..f4629ffec7 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -222,10 +222,10 @@ namespace compiler {
V(ArmI16x8ShrS) \
V(ArmI16x8SConvertI32x4) \
V(ArmI16x8Add) \
- V(ArmI16x8AddSaturateS) \
+ V(ArmI16x8AddSatS) \
V(ArmI16x8AddHoriz) \
V(ArmI16x8Sub) \
- V(ArmI16x8SubSaturateS) \
+ V(ArmI16x8SubSatS) \
V(ArmI16x8Mul) \
V(ArmI16x8MinS) \
V(ArmI16x8MaxS) \
@@ -238,8 +238,8 @@ namespace compiler {
V(ArmI16x8UConvertI8x16High) \
V(ArmI16x8ShrU) \
V(ArmI16x8UConvertI32x4) \
- V(ArmI16x8AddSaturateU) \
- V(ArmI16x8SubSaturateU) \
+ V(ArmI16x8AddSatU) \
+ V(ArmI16x8SubSatU) \
V(ArmI16x8MinU) \
V(ArmI16x8MaxU) \
V(ArmI16x8GtU) \
@@ -255,9 +255,9 @@ namespace compiler {
V(ArmI8x16ShrS) \
V(ArmI8x16SConvertI16x8) \
V(ArmI8x16Add) \
- V(ArmI8x16AddSaturateS) \
+ V(ArmI8x16AddSatS) \
V(ArmI8x16Sub) \
- V(ArmI8x16SubSaturateS) \
+ V(ArmI8x16SubSatS) \
V(ArmI8x16Mul) \
V(ArmI8x16MinS) \
V(ArmI8x16MaxS) \
@@ -268,8 +268,8 @@ namespace compiler {
V(ArmI8x16ExtractLaneU) \
V(ArmI8x16ShrU) \
V(ArmI8x16UConvertI16x8) \
- V(ArmI8x16AddSaturateU) \
- V(ArmI8x16SubSaturateU) \
+ V(ArmI8x16AddSatU) \
+ V(ArmI8x16SubSatU) \
V(ArmI8x16MinU) \
V(ArmI8x16MaxU) \
V(ArmI8x16GtU) \
@@ -321,16 +321,18 @@ namespace compiler {
V(ArmV16x8AllTrue) \
V(ArmV8x16AnyTrue) \
V(ArmV8x16AllTrue) \
- V(ArmS8x16LoadSplat) \
- V(ArmS16x8LoadSplat) \
- V(ArmS32x4LoadSplat) \
- V(ArmS64x2LoadSplat) \
- V(ArmI16x8Load8x8S) \
- V(ArmI16x8Load8x8U) \
- V(ArmI32x4Load16x4S) \
- V(ArmI32x4Load16x4U) \
- V(ArmI64x2Load32x2S) \
- V(ArmI64x2Load32x2U) \
+ V(ArmS128Load8Splat) \
+ V(ArmS128Load16Splat) \
+ V(ArmS128Load32Splat) \
+ V(ArmS128Load64Splat) \
+ V(ArmS128Load8x8S) \
+ V(ArmS128Load8x8U) \
+ V(ArmS128Load16x4S) \
+ V(ArmS128Load16x4U) \
+ V(ArmS128Load32x2S) \
+ V(ArmS128Load32x2U) \
+ V(ArmS128Load32Zero) \
+ V(ArmS128Load64Zero) \
V(ArmWord32AtomicPairLoad) \
V(ArmWord32AtomicPairStore) \
V(ArmWord32AtomicPairAdd) \
diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index 6459d22a11..70fb1a7ccf 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -202,10 +202,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI16x8ShrS:
case kArmI16x8SConvertI32x4:
case kArmI16x8Add:
- case kArmI16x8AddSaturateS:
+ case kArmI16x8AddSatS:
case kArmI16x8AddHoriz:
case kArmI16x8Sub:
- case kArmI16x8SubSaturateS:
+ case kArmI16x8SubSatS:
case kArmI16x8Mul:
case kArmI16x8MinS:
case kArmI16x8MaxS:
@@ -218,8 +218,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI16x8UConvertI8x16High:
case kArmI16x8ShrU:
case kArmI16x8UConvertI32x4:
- case kArmI16x8AddSaturateU:
- case kArmI16x8SubSaturateU:
+ case kArmI16x8AddSatU:
+ case kArmI16x8SubSatU:
case kArmI16x8MinU:
case kArmI16x8MaxU:
case kArmI16x8GtU:
@@ -235,9 +235,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI8x16ShrS:
case kArmI8x16SConvertI16x8:
case kArmI8x16Add:
- case kArmI8x16AddSaturateS:
+ case kArmI8x16AddSatS:
case kArmI8x16Sub:
- case kArmI8x16SubSaturateS:
+ case kArmI8x16SubSatS:
case kArmI8x16Mul:
case kArmI8x16MinS:
case kArmI8x16MaxS:
@@ -247,8 +247,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI8x16GeS:
case kArmI8x16ExtractLaneU:
case kArmI8x16UConvertI16x8:
- case kArmI8x16AddSaturateU:
- case kArmI8x16SubSaturateU:
+ case kArmI8x16AddSatU:
+ case kArmI8x16SubSatU:
case kArmI8x16ShrU:
case kArmI8x16MinU:
case kArmI8x16MaxU:
@@ -314,16 +314,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmLdr:
case kArmPeek:
case kArmWord32AtomicPairLoad:
- case kArmS8x16LoadSplat:
- case kArmS16x8LoadSplat:
- case kArmS32x4LoadSplat:
- case kArmS64x2LoadSplat:
- case kArmI16x8Load8x8S:
- case kArmI16x8Load8x8U:
- case kArmI32x4Load16x4S:
- case kArmI32x4Load16x4U:
- case kArmI64x2Load32x2S:
- case kArmI64x2Load32x2U:
+ case kArmS128Load8Splat:
+ case kArmS128Load16Splat:
+ case kArmS128Load32Splat:
+ case kArmS128Load64Splat:
+ case kArmS128Load8x8S:
+ case kArmS128Load8x8U:
+ case kArmS128Load16x4S:
+ case kArmS128Load16x4U:
+ case kArmS128Load32x2S:
+ case kArmS128Load32x2U:
+ case kArmS128Load32Zero:
+ case kArmS128Load64Zero:
return kIsLoadOperation;
case kArmVstrF32:
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index e868a1a47a..248f76558e 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -29,8 +29,8 @@ class ArmOperandGenerator : public OperandGenerator {
bool CanBeImmediate(Node* node, InstructionCode opcode) {
Int32Matcher m(node);
- if (!m.HasValue()) return false;
- int32_t value = m.Value();
+ if (!m.HasResolvedValue()) return false;
+ int32_t value = m.ResolvedValue();
switch (ArchOpcodeField::decode(opcode)) {
case kArmAnd:
case kArmMov:
@@ -95,7 +95,7 @@ void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node, int width) {
ArmOperandGenerator g(selector);
Int32Matcher m(node->InputAt(1));
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
if (m.IsMultipleOf(width)) {
selector->EmitIdentity(node);
} else {
@@ -389,13 +389,14 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
size_t input_count = 2;
ExternalReferenceMatcher m(base);
- if (m.HasValue() && selector->CanAddressRelativeToRootsRegister(m.Value())) {
+ if (m.HasResolvedValue() &&
+ selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
Int32Matcher int_matcher(index);
- if (int_matcher.HasValue()) {
+ if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta =
- int_matcher.Value() +
+ int_matcher.ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
- selector->isolate(), m.Value());
+ selector->isolate(), m.ResolvedValue());
input_count = 1;
inputs[0] = g.UseImmediate(static_cast<int32_t>(delta));
opcode |= AddressingModeField::encode(kMode_Root);
@@ -502,35 +503,41 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
LoadTransformParameters params = LoadTransformParametersOf(node->op());
InstructionCode opcode = kArchNop;
switch (params.transformation) {
- case LoadTransformation::kS8x16LoadSplat:
- opcode = kArmS8x16LoadSplat;
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kArmS128Load8Splat;
break;
- case LoadTransformation::kS16x8LoadSplat:
- opcode = kArmS16x8LoadSplat;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kArmS128Load16Splat;
break;
- case LoadTransformation::kS32x4LoadSplat:
- opcode = kArmS32x4LoadSplat;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kArmS128Load32Splat;
break;
- case LoadTransformation::kS64x2LoadSplat:
- opcode = kArmS64x2LoadSplat;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kArmS128Load64Splat;
break;
- case LoadTransformation::kI16x8Load8x8S:
- opcode = kArmI16x8Load8x8S;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kArmS128Load8x8S;
break;
- case LoadTransformation::kI16x8Load8x8U:
- opcode = kArmI16x8Load8x8U;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kArmS128Load8x8U;
break;
- case LoadTransformation::kI32x4Load16x4S:
- opcode = kArmI32x4Load16x4S;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kArmS128Load16x4S;
break;
- case LoadTransformation::kI32x4Load16x4U:
- opcode = kArmI32x4Load16x4U;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kArmS128Load16x4U;
break;
- case LoadTransformation::kI64x2Load32x2S:
- opcode = kArmI64x2Load32x2S;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kArmS128Load32x2S;
break;
- case LoadTransformation::kI64x2Load32x2U:
- opcode = kArmI64x2Load32x2U;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kArmS128Load32x2U;
+ break;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kArmS128Load32Zero;
+ break;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kArmS128Load64Zero;
break;
default:
UNIMPLEMENTED();
@@ -666,17 +673,17 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
ExternalReferenceMatcher m(base);
- if (m.HasValue() && CanAddressRelativeToRootsRegister(m.Value())) {
+ if (m.HasResolvedValue() &&
+ CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
Int32Matcher int_matcher(index);
- if (int_matcher.HasValue()) {
+ if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta =
- int_matcher.Value() +
+ int_matcher.ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
- isolate(), m.Value());
+ isolate(), m.ResolvedValue());
int input_count = 2;
InstructionOperand inputs[2];
inputs[0] = g.UseRegister(value);
@@ -898,16 +905,16 @@ void InstructionSelector::VisitWord32And(Node* node) {
return;
}
}
- if (m.right().HasValue()) {
- uint32_t const value = m.right().Value();
+ if (m.right().HasResolvedValue()) {
+ uint32_t const value = m.right().ResolvedValue();
uint32_t width = base::bits::CountPopulation(value);
uint32_t leading_zeros = base::bits::CountLeadingZeros32(value);
// Try to merge SHR operations on the left hand input into this AND.
if (m.left().IsWord32Shr()) {
Int32BinopMatcher mshr(m.left().node());
- if (mshr.right().HasValue()) {
- uint32_t const shift = mshr.right().Value();
+ if (mshr.right().HasResolvedValue()) {
+ uint32_t const shift = mshr.right().ResolvedValue();
if (((shift == 8) || (shift == 16) || (shift == 24)) &&
(value == 0xFF)) {
@@ -915,14 +922,14 @@ void InstructionSelector::VisitWord32And(Node* node) {
// bytewise rotation.
Emit(kArmUxtb, g.DefineAsRegister(m.node()),
g.UseRegister(mshr.left().node()),
- g.TempImmediate(mshr.right().Value()));
+ g.TempImmediate(mshr.right().ResolvedValue()));
return;
} else if (((shift == 8) || (shift == 16)) && (value == 0xFFFF)) {
// Merge SHR into AND by emitting a UXTH instruction with a
// bytewise rotation.
Emit(kArmUxth, g.DefineAsRegister(m.node()),
g.UseRegister(mshr.left().node()),
- g.TempImmediate(mshr.right().Value()));
+ g.TempImmediate(mshr.right().ResolvedValue()));
return;
} else if (IsSupported(ARMv7) && (width != 0) &&
((leading_zeros + width) == 32)) {
@@ -1074,11 +1081,11 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (IsSupported(ARMv7) && m.left().IsWord32And() &&
m.right().IsInRange(0, 31)) {
- uint32_t lsb = m.right().Value();
+ uint32_t lsb = m.right().ResolvedValue();
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
- uint32_t value = static_cast<uint32_t>(mleft.right().Value() >> lsb)
- << lsb;
+ if (mleft.right().HasResolvedValue()) {
+ uint32_t value =
+ static_cast<uint32_t>(mleft.right().ResolvedValue() >> lsb) << lsb;
uint32_t width = base::bits::CountPopulation(value);
uint32_t msb = base::bits::CountLeadingZeros32(value);
if ((width != 0) && (msb + width + lsb == 32)) {
@@ -1095,9 +1102,9 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
Int32BinopMatcher m(node);
if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
Int32BinopMatcher mleft(m.left().node());
- if (m.right().HasValue() && mleft.right().HasValue()) {
- uint32_t sar = m.right().Value();
- uint32_t shl = mleft.right().Value();
+ if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
+ uint32_t sar = m.right().ResolvedValue();
+ uint32_t shl = mleft.right().ResolvedValue();
if ((sar == shl) && (sar == 16)) {
Emit(kArmSxth, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(0));
@@ -1199,7 +1206,7 @@ void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
// no register aliasing of input registers with output registers.
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
shift_operand = g.UseImmediate(m.node());
} else {
shift_operand = g.UseUniqueRegister(m.node());
@@ -1420,8 +1427,8 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitInt32Mul(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
- if (m.right().HasValue() && m.right().Value() > 0) {
- int32_t value = m.right().Value();
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ int32_t value = m.right().ResolvedValue();
if (base::bits::IsPowerOfTwo(value - 1)) {
Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
@@ -2191,7 +2198,7 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode =
@@ -2217,7 +2224,7 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kWord32AtomicStoreWord8;
@@ -2247,7 +2254,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -2261,7 +2268,6 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
- return;
}
AddressingMode addressing_mode = kMode_Offset_RR;
@@ -2283,7 +2289,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
@@ -2297,7 +2303,6 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
- return;
}
AddressingMode addressing_mode = kMode_Offset_RR;
@@ -2322,7 +2327,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = int8_op;
@@ -2336,7 +2341,6 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
opcode = word32_op;
} else {
UNREACHABLE();
- return;
}
AddressingMode addressing_mode = kMode_Offset_RR;
@@ -2598,10 +2602,10 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I32x4GeU, kArmI32x4GeU) \
V(I16x8SConvertI32x4, kArmI16x8SConvertI32x4) \
V(I16x8Add, kArmI16x8Add) \
- V(I16x8AddSaturateS, kArmI16x8AddSaturateS) \
+ V(I16x8AddSatS, kArmI16x8AddSatS) \
V(I16x8AddHoriz, kArmI16x8AddHoriz) \
V(I16x8Sub, kArmI16x8Sub) \
- V(I16x8SubSaturateS, kArmI16x8SubSaturateS) \
+ V(I16x8SubSatS, kArmI16x8SubSatS) \
V(I16x8Mul, kArmI16x8Mul) \
V(I16x8MinS, kArmI16x8MinS) \
V(I16x8MaxS, kArmI16x8MaxS) \
@@ -2610,8 +2614,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I16x8GtS, kArmI16x8GtS) \
V(I16x8GeS, kArmI16x8GeS) \
V(I16x8UConvertI32x4, kArmI16x8UConvertI32x4) \
- V(I16x8AddSaturateU, kArmI16x8AddSaturateU) \
- V(I16x8SubSaturateU, kArmI16x8SubSaturateU) \
+ V(I16x8AddSatU, kArmI16x8AddSatU) \
+ V(I16x8SubSatU, kArmI16x8SubSatU) \
V(I16x8MinU, kArmI16x8MinU) \
V(I16x8MaxU, kArmI16x8MaxU) \
V(I16x8GtU, kArmI16x8GtU) \
@@ -2619,9 +2623,9 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I16x8RoundingAverageU, kArmI16x8RoundingAverageU) \
V(I8x16SConvertI16x8, kArmI8x16SConvertI16x8) \
V(I8x16Add, kArmI8x16Add) \
- V(I8x16AddSaturateS, kArmI8x16AddSaturateS) \
+ V(I8x16AddSatS, kArmI8x16AddSatS) \
V(I8x16Sub, kArmI8x16Sub) \
- V(I8x16SubSaturateS, kArmI8x16SubSaturateS) \
+ V(I8x16SubSatS, kArmI8x16SubSatS) \
V(I8x16Mul, kArmI8x16Mul) \
V(I8x16MinS, kArmI8x16MinS) \
V(I8x16MaxS, kArmI8x16MaxS) \
@@ -2630,8 +2634,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16GtS, kArmI8x16GtS) \
V(I8x16GeS, kArmI8x16GeS) \
V(I8x16UConvertI16x8, kArmI8x16UConvertI16x8) \
- V(I8x16AddSaturateU, kArmI8x16AddSaturateU) \
- V(I8x16SubSaturateU, kArmI8x16SubSaturateU) \
+ V(I8x16AddSatU, kArmI8x16AddSatU) \
+ V(I8x16SubSatU, kArmI8x16SubSatU) \
V(I8x16MinU, kArmI8x16MinU) \
V(I8x16MaxU, kArmI8x16MaxU) \
V(I8x16GtU, kArmI8x16GtU) \
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 6524502408..02809942a1 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -418,6 +418,18 @@ void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode,
}
}
+// Handles unary ops that work for float (scalar), double (scalar), or NEON.
+template <typename Fn>
+void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
+ Arm64OperandConverter i, VectorFormat scalar,
+ VectorFormat vector) {
+ VectorFormat f = instr->InputAt(0)->IsSimd128Register() ? vector : scalar;
+
+ VRegister output = VRegister::Create(i.OutputDoubleRegister().code(), f);
+ VRegister input = VRegister::Create(i.InputDoubleRegister(0).code(), f);
+ (tasm->*fn)(output, input);
+}
+
} // namespace
#define ASSEMBLE_SHIFT(asm_instr, width) \
@@ -679,7 +691,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeObject(reg);
}
@@ -720,7 +732,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeObject(reg);
}
@@ -750,7 +762,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
UseScratchRegisterScope temps(tasm());
temps.Exclude(x17);
@@ -1030,31 +1042,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(tanh);
break;
case kArm64Float32RoundDown:
- __ Frintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatS,
+ kFormat4S);
break;
case kArm64Float64RoundDown:
- __ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatD,
+ kFormat2D);
break;
case kArm64Float32RoundUp:
- __ Frintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatS,
+ kFormat4S);
break;
case kArm64Float64RoundUp:
- __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatD,
+ kFormat2D);
break;
case kArm64Float64RoundTiesAway:
- __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frinta, instr, i, kFormatD,
+ kFormat2D);
break;
case kArm64Float32RoundTruncate:
- __ Frintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatS,
+ kFormat4S);
break;
case kArm64Float64RoundTruncate:
- __ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatD,
+ kFormat2D);
break;
case kArm64Float32RoundTiesEven:
- __ Frintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatS,
+ kFormat4S);
break;
case kArm64Float64RoundTiesEven:
- __ Frintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatD,
+ kFormat2D);
break;
case kArm64Add:
if (FlagsModeField::decode(opcode) != kFlags_none) {
@@ -1118,12 +1139,64 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Mul32:
__ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
break;
- case kArm64Smull:
- __ Smull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
+ case kArm64Saddlp: {
+ VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
+ __ Saddlp(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f));
break;
- case kArm64Umull:
- __ Umull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
+ }
+ case kArm64Uaddlp: {
+ VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
+ __ Uaddlp(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f));
+ break;
+ }
+ case kArm64Smull: {
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Smull(i.OutputRegister(), i.InputRegister32(0),
+ i.InputRegister32(1));
+ } else {
+ DCHECK(instr->InputAt(0)->IsSimd128Register());
+ VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidth(dst_f);
+ __ Smull(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f),
+ i.InputSimd128Register(1).Format(src_f));
+ }
+ break;
+ }
+ case kArm64Smull2: {
+ VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
+ __ Smull2(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f),
+ i.InputSimd128Register(1).Format(src_f));
break;
+ }
+ case kArm64Umull: {
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Umull(i.OutputRegister(), i.InputRegister32(0),
+ i.InputRegister32(1));
+ } else {
+ DCHECK(instr->InputAt(0)->IsSimd128Register());
+ VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidth(dst_f);
+ __ Umull(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f),
+ i.InputSimd128Register(1).Format(src_f));
+ }
+ break;
+ }
+ case kArm64Umull2: {
+ VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
+ __ Umull2(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f),
+ i.InputSimd128Register(1).Format(src_f));
+ break;
+ }
case kArm64Madd:
__ Madd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.InputRegister(2));
@@ -1399,6 +1472,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Cmn32:
__ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
+ case kArm64Cnt: {
+ VectorFormat f = VectorFormatFillQ(MiscField::decode(opcode));
+ __ Cnt(i.OutputSimd128Register().Format(f),
+ i.InputSimd128Register(0).Format(f));
+ break;
+ }
case kArm64Tst:
__ Tst(i.InputOrZeroRegister64(0), i.InputOperand2_64(1));
break;
@@ -1852,11 +1931,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Instr(i.OutputSimd128Register().V##FORMAT(), \
i.InputSimd128Register(0).V##FORMAT()); \
break;
-#define SIMD_WIDENING_UNOP_CASE(Op, Instr, WIDE, NARROW) \
- case Op: \
- __ Instr(i.OutputSimd128Register().V##WIDE(), \
- i.InputSimd128Register(0).V##NARROW()); \
- break;
#define SIMD_BINOP_CASE(Op, Instr, FORMAT) \
case Op: \
__ Instr(i.OutputSimd128Register().V##FORMAT(), \
@@ -1872,6 +1946,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; \
}
+ case kArm64Sxtl: {
+ VectorFormat wide = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat narrow = VectorFormatHalfWidth(wide);
+ __ Sxtl(i.OutputSimd128Register().Format(wide),
+ i.InputSimd128Register(0).Format(narrow));
+ break;
+ }
+ case kArm64Sxtl2: {
+ VectorFormat wide = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat narrow = VectorFormatHalfWidthDoubleLanes(wide);
+ __ Sxtl2(i.OutputSimd128Register().Format(wide),
+ i.InputSimd128Register(0).Format(narrow));
+ break;
+ }
+ case kArm64Uxtl: {
+ VectorFormat wide = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat narrow = VectorFormatHalfWidth(wide);
+ __ Uxtl(i.OutputSimd128Register().Format(wide),
+ i.InputSimd128Register(0).Format(narrow));
+ break;
+ }
+ case kArm64Uxtl2: {
+ VectorFormat wide = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat narrow = VectorFormatHalfWidthDoubleLanes(wide);
+ __ Uxtl2(i.OutputSimd128Register().Format(wide),
+ i.InputSimd128Register(0).Format(narrow));
+ break;
+ }
case kArm64F64x2Splat: {
__ Dup(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).D(), 0);
break;
@@ -1940,22 +2042,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
break;
}
- case kArm64F64x2RoundUp:
- __ Frintp(i.OutputSimd128Register().V2D(),
- i.InputSimd128Register(0).V2D());
- break;
- case kArm64F64x2RoundDown:
- __ Frintm(i.OutputSimd128Register().V2D(),
- i.InputSimd128Register(0).V2D());
- break;
- case kArm64F64x2RoundTruncate:
- __ Frintz(i.OutputSimd128Register().V2D(),
- i.InputSimd128Register(0).V2D());
- break;
- case kArm64F64x2RoundTiesEven:
- __ Frintn(i.OutputSimd128Register().V2D(),
- i.InputSimd128Register(0).V2D());
- break;
case kArm64F32x4Splat: {
__ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0);
break;
@@ -2029,22 +2115,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
break;
}
- case kArm64F32x4RoundUp:
- __ Frintp(i.OutputSimd128Register().V4S(),
- i.InputSimd128Register(0).V4S());
- break;
- case kArm64F32x4RoundDown:
- __ Frintm(i.OutputSimd128Register().V4S(),
- i.InputSimd128Register(0).V4S());
- break;
- case kArm64F32x4RoundTruncate:
- __ Frintz(i.OutputSimd128Register().V4S(),
- i.InputSimd128Register(0).V4S());
- break;
- case kArm64F32x4RoundTiesEven:
- __ Frintn(i.OutputSimd128Register().V4S(),
- i.InputSimd128Register(0).V4S());
- break;
case kArm64I64x2Splat: {
__ Dup(i.OutputSimd128Register().V2D(), i.InputRegister64(0));
break;
@@ -2134,21 +2204,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
SIMD_BINOP_CASE(kArm64I64x2Eq, Cmeq, 2D);
- case kArm64I64x2Ne: {
- VRegister dst = i.OutputSimd128Register().V2D();
- __ Cmeq(dst, i.InputSimd128Register(0).V2D(),
- i.InputSimd128Register(1).V2D());
- __ Mvn(dst, dst);
- break;
- }
- SIMD_BINOP_CASE(kArm64I64x2GtS, Cmgt, 2D);
- SIMD_BINOP_CASE(kArm64I64x2GeS, Cmge, 2D);
case kArm64I64x2ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 6, V2D, Ushl, X);
break;
}
- SIMD_BINOP_CASE(kArm64I64x2GtU, Cmhi, 2D);
- SIMD_BINOP_CASE(kArm64I64x2GeU, Cmhs, 2D);
case kArm64I32x4Splat: {
__ Dup(i.OutputSimd128Register().V4S(), i.InputRegister32(0));
break;
@@ -2168,8 +2227,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
SIMD_UNOP_CASE(kArm64I32x4SConvertF32x4, Fcvtzs, 4S);
- SIMD_WIDENING_UNOP_CASE(kArm64I32x4SConvertI16x8Low, Sxtl, 4S, 4H);
- SIMD_WIDENING_UNOP_CASE(kArm64I32x4SConvertI16x8High, Sxtl2, 4S, 8H);
SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S);
case kArm64I32x4Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 5, V4S, Sshl, W);
@@ -2198,8 +2255,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_CASE(kArm64I32x4GtS, Cmgt, 4S);
SIMD_BINOP_CASE(kArm64I32x4GeS, Cmge, 4S);
SIMD_UNOP_CASE(kArm64I32x4UConvertF32x4, Fcvtzu, 4S);
- SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8Low, Uxtl, 4S, 4H);
- SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8High, Uxtl2, 4S, 8H);
case kArm64I32x4ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 5, V4S, Ushl, W);
break;
@@ -2258,8 +2313,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
break;
}
- SIMD_WIDENING_UNOP_CASE(kArm64I16x8SConvertI8x16Low, Sxtl, 8H, 8B);
- SIMD_WIDENING_UNOP_CASE(kArm64I16x8SConvertI8x16High, Sxtl2, 8H, 16B);
SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H);
case kArm64I16x8Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 4, V8H, Sshl, W);
@@ -2284,10 +2337,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
SIMD_BINOP_CASE(kArm64I16x8Add, Add, 8H);
- SIMD_BINOP_CASE(kArm64I16x8AddSaturateS, Sqadd, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8AddSatS, Sqadd, 8H);
SIMD_BINOP_CASE(kArm64I16x8AddHoriz, Addp, 8H);
SIMD_BINOP_CASE(kArm64I16x8Sub, Sub, 8H);
- SIMD_BINOP_CASE(kArm64I16x8SubSaturateS, Sqsub, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8SubSatS, Sqsub, 8H);
SIMD_BINOP_CASE(kArm64I16x8Mul, Mul, 8H);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I16x8Mla, Mla, 8H);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I16x8Mls, Mls, 8H);
@@ -2303,15 +2356,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
SIMD_BINOP_CASE(kArm64I16x8GtS, Cmgt, 8H);
SIMD_BINOP_CASE(kArm64I16x8GeS, Cmge, 8H);
- case kArm64I16x8UConvertI8x16Low: {
- __ Uxtl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8B());
- break;
- }
- case kArm64I16x8UConvertI8x16High: {
- __ Uxtl2(i.OutputSimd128Register().V8H(),
- i.InputSimd128Register(0).V16B());
- break;
- }
case kArm64I16x8ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 4, V8H, Ushl, W);
break;
@@ -2330,13 +2374,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtun2(dst.V8H(), src1.V4S());
break;
}
- SIMD_BINOP_CASE(kArm64I16x8AddSaturateU, Uqadd, 8H);
- SIMD_BINOP_CASE(kArm64I16x8SubSaturateU, Uqsub, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8AddSatU, Uqadd, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8SubSatU, Uqsub, 8H);
SIMD_BINOP_CASE(kArm64I16x8MinU, Umin, 8H);
SIMD_BINOP_CASE(kArm64I16x8MaxU, Umax, 8H);
SIMD_BINOP_CASE(kArm64I16x8GtU, Cmhi, 8H);
SIMD_BINOP_CASE(kArm64I16x8GeU, Cmhs, 8H);
SIMD_BINOP_CASE(kArm64I16x8RoundingAverageU, Urhadd, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8Q15MulRSatS, Sqrdmulh, 8H);
SIMD_UNOP_CASE(kArm64I16x8Abs, Abs, 8H);
case kArm64I16x8BitMask: {
Register dst = i.OutputRegister32();
@@ -2400,9 +2445,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
SIMD_BINOP_CASE(kArm64I8x16Add, Add, 16B);
- SIMD_BINOP_CASE(kArm64I8x16AddSaturateS, Sqadd, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16AddSatS, Sqadd, 16B);
SIMD_BINOP_CASE(kArm64I8x16Sub, Sub, 16B);
- SIMD_BINOP_CASE(kArm64I8x16SubSaturateS, Sqsub, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16SubSatS, Sqsub, 16B);
SIMD_BINOP_CASE(kArm64I8x16Mul, Mul, 16B);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I8x16Mla, Mla, 16B);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I8x16Mls, Mls, 16B);
@@ -2436,8 +2481,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtun2(dst.V16B(), src1.V8H());
break;
}
- SIMD_BINOP_CASE(kArm64I8x16AddSaturateU, Uqadd, 16B);
- SIMD_BINOP_CASE(kArm64I8x16SubSaturateU, Uqsub, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16AddSatU, Uqadd, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16SubSatU, Uqsub, 16B);
SIMD_BINOP_CASE(kArm64I8x16MinU, Umin, 16B);
SIMD_BINOP_CASE(kArm64I8x16MaxU, Umax, 16B);
SIMD_BINOP_CASE(kArm64I8x16GtU, Cmhi, 16B);
@@ -2562,17 +2607,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src1 = i.InputSimd128Register(1).V16B();
// Unary shuffle table is in src0, binary shuffle table is in src0, src1,
// which must be consecutive.
- uint32_t mask = 0;
- if (src0 == src1) {
- mask = 0x0F0F0F0F;
- } else {
- mask = 0x1F1F1F1F;
+ if (src0 != src1) {
DCHECK(AreConsecutive(src0, src1));
}
- int64_t imm1 =
- make_uint64(i.InputInt32(3) & mask, i.InputInt32(2) & mask);
- int64_t imm2 =
- make_uint64(i.InputInt32(5) & mask, i.InputInt32(4) & mask);
+
+ int64_t imm1 = make_uint64(i.InputInt32(3), i.InputInt32(2));
+ int64_t imm2 = make_uint64(i.InputInt32(5), i.InputInt32(4));
+ DCHECK_EQ(0, (imm1 | imm2) & (src0 == src1 ? 0xF0F0F0F0F0F0F0F0
+ : 0xE0E0E0E0E0E0E0E0));
+
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireV(kFormat16B);
__ Movi(temp, imm2, imm1);
@@ -2590,57 +2633,46 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64S8x8Reverse, Rev64, 16B);
SIMD_UNOP_CASE(kArm64S8x4Reverse, Rev32, 16B);
SIMD_UNOP_CASE(kArm64S8x2Reverse, Rev16, 16B);
- case kArm64V64x2AllTrue: {
- UseScratchRegisterScope scope(tasm());
- VRegister temp1 = scope.AcquireV(kFormat2D);
- VRegister temp2 = scope.AcquireV(kFormatS);
-
- __ Cmeq(temp1, i.InputSimd128Register(0).V2D(), 0);
- __ Umaxv(temp2, temp1.V4S());
- __ Umov(i.OutputRegister32(), temp2, 0);
- __ Add(i.OutputRegister32(), i.OutputRegister32(), 1);
- break;
- }
case kArm64LoadSplat: {
VectorFormat f = VectorFormatFillQ(MiscField::decode(opcode));
__ ld1r(i.OutputSimd128Register().Format(f), i.MemoryOperand(0));
break;
}
- case kArm64I16x8Load8x8S: {
+ case kArm64S128Load8x8S: {
__ Ldr(i.OutputSimd128Register().V8B(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V8H(), i.OutputSimd128Register().V8B());
break;
}
- case kArm64I16x8Load8x8U: {
+ case kArm64S128Load8x8U: {
__ Ldr(i.OutputSimd128Register().V8B(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V8H(), i.OutputSimd128Register().V8B());
break;
}
- case kArm64I32x4Load16x4S: {
+ case kArm64S128Load16x4S: {
__ Ldr(i.OutputSimd128Register().V4H(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V4S(), i.OutputSimd128Register().V4H());
break;
}
- case kArm64I32x4Load16x4U: {
+ case kArm64S128Load16x4U: {
__ Ldr(i.OutputSimd128Register().V4H(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V4S(), i.OutputSimd128Register().V4H());
break;
}
- case kArm64I64x2Load32x2S: {
+ case kArm64S128Load32x2S: {
__ Ldr(i.OutputSimd128Register().V2S(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V2D(), i.OutputSimd128Register().V2S());
break;
}
- case kArm64I64x2Load32x2U: {
+ case kArm64S128Load32x2U: {
__ Ldr(i.OutputSimd128Register().V2S(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V2D(), i.OutputSimd128Register().V2S());
break;
}
- case kArm64S128LoadMem32Zero: {
+ case kArm64S128Load32Zero: {
__ Ldr(i.OutputSimd128Register().S(), i.MemoryOperand(0));
break;
}
- case kArm64S128LoadMem64Zero: {
+ case kArm64S128Load64Zero: {
__ Ldr(i.OutputSimd128Register().D(), i.MemoryOperand(0));
break;
}
@@ -2664,7 +2696,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} // NOLINT(readability/fn_size)
#undef SIMD_UNOP_CASE
-#undef SIMD_WIDENING_UNOP_CASE
#undef SIMD_BINOP_CASE
#undef SIMD_DESTRUCTIVE_BINOP_CASE
#undef SIMD_REDUCE_OP_CASE
@@ -3062,11 +3093,10 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
const int returns = RoundUp(frame()->GetReturnSlotCount(), 2);
-
if (returns != 0) {
__ Drop(returns);
}
@@ -3083,48 +3113,113 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
unwinding_info_writer_.MarkBlockWillExit();
+ // We might need x3 for scratch.
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & x3.bit());
+ const int parameter_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
Arm64OperandConverter g(this, nullptr);
- int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
+
+ // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_count != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (__ emit_debug_code()) {
+ __ cmp(g.ToRegister(additional_pop_count), Operand(0));
+ __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue);
+ }
+ }
+
+ Register argc_reg = x3;
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_count} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_count != 0;
+#else
+ const bool drop_jsargs = false;
+#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
// number of stack slot pops.
- if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
if (return_label_.is_bound()) {
__ B(&return_label_);
return;
} else {
__ Bind(&return_label_);
- AssembleDeconstructFrame();
}
- } else {
- AssembleDeconstructFrame();
}
+ if (drop_jsargs) {
+ // Get the actual argument count.
+ __ Ldr(argc_reg, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
}
- if (pop->IsImmediate()) {
- pop_count += g.ToConstant(pop).ToInt32();
- __ DropArguments(pop_count);
+ if (drop_jsargs) {
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ Label argc_reg_has_final_count;
+ __ Add(argc_reg, argc_reg, 1); // Consider the receiver.
+ if (parameter_count > 1) {
+ __ Cmp(argc_reg, Operand(parameter_count));
+ __ B(&argc_reg_has_final_count, ge);
+ __ Mov(argc_reg, Operand(parameter_count));
+ __ Bind(&argc_reg_has_final_count);
+ }
+ __ DropArguments(argc_reg);
+ } else if (additional_pop_count->IsImmediate()) {
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ __ DropArguments(parameter_count + additional_count);
+ } else if (parameter_count == 0) {
+ __ DropArguments(g.ToRegister(additional_pop_count));
} else {
- Register pop_reg = g.ToRegister(pop);
- __ Add(pop_reg, pop_reg, pop_count);
- __ DropArguments(pop_reg);
+ // {additional_pop_count} is guaranteed to be zero if {parameter_count !=
+ // 0}. Check RawMachineAssembler::PopAndReturn.
+ __ DropArguments(parameter_count);
}
-
__ AssertSpAligned();
__ Ret();
}
void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {
__ ForceConstantPoolEmissionWithoutJump();
// We are conservative here, assuming all deopts are lazy deopts.
DCHECK_GE(Deoptimizer::kLazyDeoptExitSize,
Deoptimizer::kNonLazyDeoptExitSize);
- __ CheckVeneerPool(false, false,
- deopt_count * Deoptimizer::kLazyDeoptExitSize);
+ __ CheckVeneerPool(
+ false, false,
+ static_cast<int>(exits->size()) * Deoptimizer::kLazyDeoptExitSize);
+
+ // Check which deopt kinds exist in this Code object, to avoid emitting jumps
+ // to unused entries.
+ bool saw_deopt_kind[kDeoptimizeKindCount] = {false};
+ for (auto exit : *exits) {
+ saw_deopt_kind[static_cast<int>(exit->kind())] = true;
+ }
+
+ // Emit the jumps to deoptimization entries.
+ UseScratchRegisterScope scope(tasm());
+ Register scratch = scope.AcquireX();
+ STATIC_ASSERT(static_cast<int>(kFirstDeoptimizeKind) == 0);
+ for (int i = 0; i < kDeoptimizeKindCount; i++) {
+ if (!saw_deopt_kind[i]) continue;
+ __ bind(&jump_deoptimization_entry_labels_[i]);
+ __ LoadEntryFromBuiltinIndex(Deoptimizer::GetDeoptimizationEntry(
+ isolate(), static_cast<DeoptimizeKind>(i)),
+ scratch);
+ __ Jump(scratch);
+ }
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 7f84a3504b..c80538f3a9 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -24,6 +24,7 @@ namespace compiler {
V(Arm64Cmp32) \
V(Arm64Cmn) \
V(Arm64Cmn32) \
+ V(Arm64Cnt) \
V(Arm64Tst) \
V(Arm64Tst32) \
V(Arm64Or) \
@@ -34,12 +35,16 @@ namespace compiler {
V(Arm64Eor32) \
V(Arm64Eon) \
V(Arm64Eon32) \
+ V(Arm64Saddlp) \
V(Arm64Sub) \
V(Arm64Sub32) \
V(Arm64Mul) \
V(Arm64Mul32) \
V(Arm64Smull) \
+ V(Arm64Smull2) \
+ V(Arm64Uaddlp) \
V(Arm64Umull) \
+ V(Arm64Umull2) \
V(Arm64Madd) \
V(Arm64Madd32) \
V(Arm64Msub) \
@@ -168,6 +173,10 @@ namespace compiler {
V(Arm64StrCompressTagged) \
V(Arm64DmbIsh) \
V(Arm64DsbIsb) \
+ V(Arm64Sxtl) \
+ V(Arm64Sxtl2) \
+ V(Arm64Uxtl) \
+ V(Arm64Uxtl2) \
V(Arm64F64x2Splat) \
V(Arm64F64x2ExtractLane) \
V(Arm64F64x2ReplaceLane) \
@@ -188,10 +197,6 @@ namespace compiler {
V(Arm64F64x2Qfms) \
V(Arm64F64x2Pmin) \
V(Arm64F64x2Pmax) \
- V(Arm64F64x2RoundUp) \
- V(Arm64F64x2RoundDown) \
- V(Arm64F64x2RoundTruncate) \
- V(Arm64F64x2RoundTiesEven) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
V(Arm64F32x4ReplaceLane) \
@@ -217,10 +222,6 @@ namespace compiler {
V(Arm64F32x4Qfms) \
V(Arm64F32x4Pmin) \
V(Arm64F32x4Pmax) \
- V(Arm64F32x4RoundUp) \
- V(Arm64F32x4RoundDown) \
- V(Arm64F32x4RoundTruncate) \
- V(Arm64F32x4RoundTiesEven) \
V(Arm64I64x2Splat) \
V(Arm64I64x2ExtractLane) \
V(Arm64I64x2ReplaceLane) \
@@ -231,18 +232,11 @@ namespace compiler {
V(Arm64I64x2Sub) \
V(Arm64I64x2Mul) \
V(Arm64I64x2Eq) \
- V(Arm64I64x2Ne) \
- V(Arm64I64x2GtS) \
- V(Arm64I64x2GeS) \
V(Arm64I64x2ShrU) \
- V(Arm64I64x2GtU) \
- V(Arm64I64x2GeU) \
V(Arm64I32x4Splat) \
V(Arm64I32x4ExtractLane) \
V(Arm64I32x4ReplaceLane) \
V(Arm64I32x4SConvertF32x4) \
- V(Arm64I32x4SConvertI16x8Low) \
- V(Arm64I32x4SConvertI16x8High) \
V(Arm64I32x4Neg) \
V(Arm64I32x4Shl) \
V(Arm64I32x4ShrS) \
@@ -259,8 +253,6 @@ namespace compiler {
V(Arm64I32x4GtS) \
V(Arm64I32x4GeS) \
V(Arm64I32x4UConvertF32x4) \
- V(Arm64I32x4UConvertI16x8Low) \
- V(Arm64I32x4UConvertI16x8High) \
V(Arm64I32x4ShrU) \
V(Arm64I32x4MinU) \
V(Arm64I32x4MaxU) \
@@ -273,17 +265,15 @@ namespace compiler {
V(Arm64I16x8ExtractLaneU) \
V(Arm64I16x8ExtractLaneS) \
V(Arm64I16x8ReplaceLane) \
- V(Arm64I16x8SConvertI8x16Low) \
- V(Arm64I16x8SConvertI8x16High) \
V(Arm64I16x8Neg) \
V(Arm64I16x8Shl) \
V(Arm64I16x8ShrS) \
V(Arm64I16x8SConvertI32x4) \
V(Arm64I16x8Add) \
- V(Arm64I16x8AddSaturateS) \
+ V(Arm64I16x8AddSatS) \
V(Arm64I16x8AddHoriz) \
V(Arm64I16x8Sub) \
- V(Arm64I16x8SubSaturateS) \
+ V(Arm64I16x8SubSatS) \
V(Arm64I16x8Mul) \
V(Arm64I16x8Mla) \
V(Arm64I16x8Mls) \
@@ -293,17 +283,16 @@ namespace compiler {
V(Arm64I16x8Ne) \
V(Arm64I16x8GtS) \
V(Arm64I16x8GeS) \
- V(Arm64I16x8UConvertI8x16Low) \
- V(Arm64I16x8UConvertI8x16High) \
V(Arm64I16x8ShrU) \
V(Arm64I16x8UConvertI32x4) \
- V(Arm64I16x8AddSaturateU) \
- V(Arm64I16x8SubSaturateU) \
+ V(Arm64I16x8AddSatU) \
+ V(Arm64I16x8SubSatU) \
V(Arm64I16x8MinU) \
V(Arm64I16x8MaxU) \
V(Arm64I16x8GtU) \
V(Arm64I16x8GeU) \
V(Arm64I16x8RoundingAverageU) \
+ V(Arm64I16x8Q15MulRSatS) \
V(Arm64I16x8Abs) \
V(Arm64I16x8BitMask) \
V(Arm64I8x16Splat) \
@@ -315,9 +304,9 @@ namespace compiler {
V(Arm64I8x16ShrS) \
V(Arm64I8x16SConvertI16x8) \
V(Arm64I8x16Add) \
- V(Arm64I8x16AddSaturateS) \
+ V(Arm64I8x16AddSatS) \
V(Arm64I8x16Sub) \
- V(Arm64I8x16SubSaturateS) \
+ V(Arm64I8x16SubSatS) \
V(Arm64I8x16Mul) \
V(Arm64I8x16Mla) \
V(Arm64I8x16Mls) \
@@ -329,8 +318,8 @@ namespace compiler {
V(Arm64I8x16GeS) \
V(Arm64I8x16ShrU) \
V(Arm64I8x16UConvertI16x8) \
- V(Arm64I8x16AddSaturateU) \
- V(Arm64I8x16SubSaturateU) \
+ V(Arm64I8x16AddSatU) \
+ V(Arm64I8x16SubSatU) \
V(Arm64I8x16MinU) \
V(Arm64I8x16MaxU) \
V(Arm64I8x16GtU) \
@@ -376,17 +365,18 @@ namespace compiler {
V(Arm64S8x4Reverse) \
V(Arm64S8x2Reverse) \
V(Arm64V128AnyTrue) \
- V(Arm64V64x2AllTrue) \
V(Arm64V32x4AllTrue) \
V(Arm64V16x8AllTrue) \
V(Arm64V8x16AllTrue) \
V(Arm64LoadSplat) \
- V(Arm64I16x8Load8x8S) \
- V(Arm64I16x8Load8x8U) \
- V(Arm64I32x4Load16x4S) \
- V(Arm64I32x4Load16x4U) \
- V(Arm64I64x2Load32x2S) \
- V(Arm64I64x2Load32x2U) \
+ V(Arm64S128Load8x8S) \
+ V(Arm64S128Load8x8U) \
+ V(Arm64S128Load16x4S) \
+ V(Arm64S128Load16x4U) \
+ V(Arm64S128Load32x2S) \
+ V(Arm64S128Load32x2U) \
+ V(Arm64S128Load32Zero) \
+ V(Arm64S128Load64Zero) \
V(Arm64Word64AtomicLoadUint8) \
V(Arm64Word64AtomicLoadUint16) \
V(Arm64Word64AtomicLoadUint32) \
@@ -422,11 +412,7 @@ namespace compiler {
V(Arm64Word64AtomicCompareExchangeUint8) \
V(Arm64Word64AtomicCompareExchangeUint16) \
V(Arm64Word64AtomicCompareExchangeUint32) \
- V(Arm64Word64AtomicCompareExchangeUint64) \
- V(Arm64S128LoadMem32Zero) \
- V(Arm64S128LoadMem64Zero)
-// TODO(v8:10930) Adding new codes before these atomic instructions causes a
-// mksnapshot error.
+ V(Arm64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 6c572d2a1c..9d53074042 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -25,6 +25,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Cmp32:
case kArm64Cmn:
case kArm64Cmn32:
+ case kArm64Cnt:
case kArm64Tst:
case kArm64Tst32:
case kArm64Or:
@@ -35,12 +36,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Eor32:
case kArm64Eon:
case kArm64Eon32:
+ case kArm64Saddlp:
case kArm64Sub:
case kArm64Sub32:
case kArm64Mul:
case kArm64Mul32:
case kArm64Smull:
+ case kArm64Smull2:
+ case kArm64Uaddlp:
case kArm64Umull:
+ case kArm64Umull2:
case kArm64Madd:
case kArm64Madd32:
case kArm64Msub:
@@ -158,10 +163,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2Qfms:
case kArm64F64x2Pmin:
case kArm64F64x2Pmax:
- case kArm64F64x2RoundUp:
- case kArm64F64x2RoundDown:
- case kArm64F64x2RoundTruncate:
- case kArm64F64x2RoundTiesEven:
case kArm64F32x4Splat:
case kArm64F32x4ExtractLane:
case kArm64F32x4ReplaceLane:
@@ -187,10 +188,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F32x4Qfms:
case kArm64F32x4Pmin:
case kArm64F32x4Pmax:
- case kArm64F32x4RoundUp:
- case kArm64F32x4RoundDown:
- case kArm64F32x4RoundTruncate:
- case kArm64F32x4RoundTiesEven:
case kArm64I64x2Splat:
case kArm64I64x2ExtractLane:
case kArm64I64x2ReplaceLane:
@@ -201,18 +198,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I64x2Sub:
case kArm64I64x2Mul:
case kArm64I64x2Eq:
- case kArm64I64x2Ne:
- case kArm64I64x2GtS:
- case kArm64I64x2GeS:
case kArm64I64x2ShrU:
- case kArm64I64x2GtU:
- case kArm64I64x2GeU:
case kArm64I32x4Splat:
case kArm64I32x4ExtractLane:
case kArm64I32x4ReplaceLane:
case kArm64I32x4SConvertF32x4:
- case kArm64I32x4SConvertI16x8Low:
- case kArm64I32x4SConvertI16x8High:
+ case kArm64Sxtl:
+ case kArm64Sxtl2:
+ case kArm64Uxtl:
+ case kArm64Uxtl2:
case kArm64I32x4Neg:
case kArm64I32x4Shl:
case kArm64I32x4ShrS:
@@ -229,8 +223,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I32x4GtS:
case kArm64I32x4GeS:
case kArm64I32x4UConvertF32x4:
- case kArm64I32x4UConvertI16x8Low:
- case kArm64I32x4UConvertI16x8High:
case kArm64I32x4ShrU:
case kArm64I32x4MinU:
case kArm64I32x4MaxU:
@@ -243,17 +235,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I16x8ExtractLaneU:
case kArm64I16x8ExtractLaneS:
case kArm64I16x8ReplaceLane:
- case kArm64I16x8SConvertI8x16Low:
- case kArm64I16x8SConvertI8x16High:
case kArm64I16x8Neg:
case kArm64I16x8Shl:
case kArm64I16x8ShrS:
case kArm64I16x8SConvertI32x4:
case kArm64I16x8Add:
- case kArm64I16x8AddSaturateS:
+ case kArm64I16x8AddSatS:
case kArm64I16x8AddHoriz:
case kArm64I16x8Sub:
- case kArm64I16x8SubSaturateS:
+ case kArm64I16x8SubSatS:
case kArm64I16x8Mul:
case kArm64I16x8Mla:
case kArm64I16x8Mls:
@@ -263,17 +253,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I16x8Ne:
case kArm64I16x8GtS:
case kArm64I16x8GeS:
- case kArm64I16x8UConvertI8x16Low:
- case kArm64I16x8UConvertI8x16High:
case kArm64I16x8ShrU:
case kArm64I16x8UConvertI32x4:
- case kArm64I16x8AddSaturateU:
- case kArm64I16x8SubSaturateU:
+ case kArm64I16x8AddSatU:
+ case kArm64I16x8SubSatU:
case kArm64I16x8MinU:
case kArm64I16x8MaxU:
case kArm64I16x8GtU:
case kArm64I16x8GeU:
case kArm64I16x8RoundingAverageU:
+ case kArm64I16x8Q15MulRSatS:
case kArm64I16x8Abs:
case kArm64I16x8BitMask:
case kArm64I8x16Splat:
@@ -285,9 +274,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I8x16ShrS:
case kArm64I8x16SConvertI16x8:
case kArm64I8x16Add:
- case kArm64I8x16AddSaturateS:
+ case kArm64I8x16AddSatS:
case kArm64I8x16Sub:
- case kArm64I8x16SubSaturateS:
+ case kArm64I8x16SubSatS:
case kArm64I8x16Mul:
case kArm64I8x16Mla:
case kArm64I8x16Mls:
@@ -298,8 +287,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I8x16GtS:
case kArm64I8x16GeS:
case kArm64I8x16UConvertI16x8:
- case kArm64I8x16AddSaturateU:
- case kArm64I8x16SubSaturateU:
+ case kArm64I8x16AddSatU:
+ case kArm64I8x16SubSatU:
case kArm64I8x16ShrU:
case kArm64I8x16MinU:
case kArm64I8x16MaxU:
@@ -346,7 +335,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S8x4Reverse:
case kArm64S8x2Reverse:
case kArm64V128AnyTrue:
- case kArm64V64x2AllTrue:
case kArm64V32x4AllTrue:
case kArm64V16x8AllTrue:
case kArm64V8x16AllTrue:
@@ -371,14 +359,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64LdrDecompressAnyTagged:
case kArm64Peek:
case kArm64LoadSplat:
- case kArm64I16x8Load8x8S:
- case kArm64I16x8Load8x8U:
- case kArm64I32x4Load16x4S:
- case kArm64I32x4Load16x4U:
- case kArm64I64x2Load32x2S:
- case kArm64I64x2Load32x2U:
- case kArm64S128LoadMem32Zero:
- case kArm64S128LoadMem64Zero:
+ case kArm64S128Load8x8S:
+ case kArm64S128Load8x8U:
+ case kArm64S128Load16x4S:
+ case kArm64S128Load16x4U:
+ case kArm64S128Load32x2S:
+ case kArm64S128Load32x2U:
+ case kArm64S128Load32Zero:
+ case kArm64S128Load64Zero:
return kIsLoadOperation;
case kArm64Claim:
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index fac7f9c1d1..584cfb6184 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -144,6 +144,13 @@ void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void VisitRR(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
+ Arm64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
Arm64OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -151,6 +158,14 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
g.UseRegister(node->InputAt(1)));
}
+void VisitRRR(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
+ Arm64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node, int width) {
Arm64OperandGenerator g(selector);
@@ -311,7 +326,7 @@ bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
if (nm.IsWord32And()) {
Int32BinopMatcher mright(right_node);
if (mright.right().Is(0xFF) || mright.right().Is(0xFFFF)) {
- int32_t mask = mright.right().Value();
+ int32_t mask = mright.right().ResolvedValue();
*left_op = g->UseRegister(left_node);
*right_op = g->UseRegister(mright.left().node());
*opcode |= AddressingModeField::encode(
@@ -325,7 +340,7 @@ bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
Int32BinopMatcher mleft_of_right(mright.left().node());
if ((mright.right().Is(16) && mleft_of_right.right().Is(16)) ||
(mright.right().Is(24) && mleft_of_right.right().Is(24))) {
- int32_t shift = mright.right().Value();
+ int32_t shift = mright.right().ResolvedValue();
*left_op = g->UseRegister(left_node);
*right_op = g->UseRegister(mleft_of_right.left().node());
*opcode |= AddressingModeField::encode(
@@ -466,8 +481,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
// We only need at most the last 6 bits of the shift.
- inputs[input_count++] =
- g.UseImmediate(static_cast<int>(m_shift.right().Value() & 0x3F));
+ inputs[input_count++] = g.UseImmediate(
+ static_cast<int>(m_shift.right().ResolvedValue() & 0x3F));
} else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
!is_add_sub)) {
if (must_commute_cond) cont->Commute();
@@ -475,8 +490,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
// We only need at most the last 6 bits of the shift.
- inputs[input_count++] =
- g.UseImmediate(static_cast<int>(m_shift.right().Value() & 0x3F));
+ inputs[input_count++] = g.UseImmediate(
+ static_cast<int>(m_shift.right().ResolvedValue() & 0x3F));
} else {
inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(right_node);
@@ -508,12 +523,12 @@ void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
ArchOpcode negate_opcode) {
Arm64OperandGenerator g(selector);
Matcher m(node);
- if (m.right().HasValue() && (m.right().Value() < 0) &&
- (m.right().Value() > std::numeric_limits<int>::min()) &&
- g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
- selector->Emit(negate_opcode, g.DefineAsRegister(node),
- g.UseRegister(m.left().node()),
- g.TempImmediate(static_cast<int32_t>(-m.right().Value())));
+ if (m.right().HasResolvedValue() && (m.right().ResolvedValue() < 0) &&
+ (m.right().ResolvedValue() > std::numeric_limits<int>::min()) &&
+ g.CanBeImmediate(-m.right().ResolvedValue(), kArithmeticImm)) {
+ selector->Emit(
+ negate_opcode, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(static_cast<int32_t>(-m.right().ResolvedValue())));
} else {
VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
}
@@ -525,8 +540,8 @@ void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
template <typename Matcher>
int32_t LeftShiftForReducedMultiply(Matcher* m) {
DCHECK(m->IsInt32Mul() || m->IsInt64Mul());
- if (m->right().HasValue() && m->right().Value() >= 3) {
- uint64_t value_minus_one = m->right().Value() - 1;
+ if (m->right().HasResolvedValue() && m->right().ResolvedValue() >= 3) {
+ uint64_t value_minus_one = m->right().ResolvedValue() - 1;
if (base::bits::IsPowerOfTwo(value_minus_one)) {
return base::bits::WhichPowerOfTwo(value_minus_one);
}
@@ -565,12 +580,12 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
outputs[0] = g.DefineAsRegister(output == nullptr ? node : output);
ExternalReferenceMatcher m(base);
- if (m.HasValue() && g.IsIntegerConstant(index) &&
- selector->CanAddressRelativeToRootsRegister(m.Value())) {
+ if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
+ selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta =
g.GetIntegerConstantValue(index) +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
- selector->isolate(), m.Value());
+ selector->isolate(), m.ResolvedValue());
input_count = 1;
// Check that the delta is a 32-bit integer due to the limitations of
// immediate operands.
@@ -606,55 +621,55 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
InstructionCode opcode = kArchNop;
bool require_add = false;
switch (params.transformation) {
- case LoadTransformation::kS8x16LoadSplat:
+ case LoadTransformation::kS128Load8Splat:
opcode = kArm64LoadSplat;
opcode |= MiscField::encode(8);
require_add = true;
break;
- case LoadTransformation::kS16x8LoadSplat:
+ case LoadTransformation::kS128Load16Splat:
opcode = kArm64LoadSplat;
opcode |= MiscField::encode(16);
require_add = true;
break;
- case LoadTransformation::kS32x4LoadSplat:
+ case LoadTransformation::kS128Load32Splat:
opcode = kArm64LoadSplat;
opcode |= MiscField::encode(32);
require_add = true;
break;
- case LoadTransformation::kS64x2LoadSplat:
+ case LoadTransformation::kS128Load64Splat:
opcode = kArm64LoadSplat;
opcode |= MiscField::encode(64);
require_add = true;
break;
- case LoadTransformation::kI16x8Load8x8S:
- opcode = kArm64I16x8Load8x8S;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kArm64S128Load8x8S;
break;
- case LoadTransformation::kI16x8Load8x8U:
- opcode = kArm64I16x8Load8x8U;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kArm64S128Load8x8U;
break;
- case LoadTransformation::kI32x4Load16x4S:
- opcode = kArm64I32x4Load16x4S;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kArm64S128Load16x4S;
break;
- case LoadTransformation::kI32x4Load16x4U:
- opcode = kArm64I32x4Load16x4U;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kArm64S128Load16x4U;
break;
- case LoadTransformation::kI64x2Load32x2S:
- opcode = kArm64I64x2Load32x2S;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kArm64S128Load32x2S;
break;
- case LoadTransformation::kI64x2Load32x2U:
- opcode = kArm64I64x2Load32x2U;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kArm64S128Load32x2U;
break;
- case LoadTransformation::kS128LoadMem32Zero:
- opcode = kArm64S128LoadMem32Zero;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kArm64S128Load32Zero;
break;
- case LoadTransformation::kS128LoadMem64Zero:
- opcode = kArm64S128LoadMem64Zero;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kArm64S128Load64Zero;
break;
default:
UNIMPLEMENTED();
}
// ARM64 supports unaligned loads
- DCHECK_NE(params.kind, LoadKind::kUnaligned);
+ DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -857,12 +872,12 @@ void InstructionSelector::VisitStore(Node* node) {
}
ExternalReferenceMatcher m(base);
- if (m.HasValue() && g.IsIntegerConstant(index) &&
- CanAddressRelativeToRootsRegister(m.Value())) {
+ if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
+ CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta =
g.GetIntegerConstantValue(index) +
- TurboAssemblerBase::RootRegisterOffsetForExternalReference(isolate(),
- m.Value());
+ TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ isolate(), m.ResolvedValue());
if (is_int32(delta)) {
input_count = 2;
InstructionOperand inputs[2];
@@ -981,8 +996,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
- m.right().HasValue()) {
- uint32_t mask = m.right().Value();
+ m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_width != 32) &&
@@ -993,9 +1008,9 @@ void InstructionSelector::VisitWord32And(Node* node) {
// Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasResolvedValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
- uint32_t lsb = mleft.right().Value() & 0x1F;
+ uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
// Ubfx cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -1021,8 +1036,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
- m.right().HasValue()) {
- uint64_t mask = m.right().Value();
+ m.right().HasResolvedValue()) {
+ uint64_t mask = m.right().ResolvedValue();
uint64_t mask_width = base::bits::CountPopulation(mask);
uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_width != 0) && (mask_width != 64) &&
@@ -1033,9 +1048,10 @@ void InstructionSelector::VisitWord64And(Node* node) {
// Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasResolvedValue()) {
// Any shift value can match; int64 shifts use `value % 64`.
- uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3F);
+ uint32_t lsb =
+ static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
// Ubfx cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -1091,12 +1107,12 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
m.right().IsInRange(1, 31)) {
Arm64OperandGenerator g(this);
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
- uint32_t mask = mleft.right().Value();
+ if (mleft.right().HasResolvedValue()) {
+ uint32_t mask = mleft.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
- uint32_t shift = m.right().Value();
+ uint32_t shift = m.right().ResolvedValue();
DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
DCHECK_NE(0u, shift);
@@ -1174,13 +1190,14 @@ bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
// Select Ubfx or Sbfx for (x << (K & 0x1F)) OP (K & 0x1F), where
// OP is >>> or >> and (K & 0x1F) != 0.
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && m.right().HasValue() &&
- (mleft.right().Value() & 0x1F) != 0 &&
- (mleft.right().Value() & 0x1F) == (m.right().Value() & 0x1F)) {
+ if (mleft.right().HasResolvedValue() && m.right().HasResolvedValue() &&
+ (mleft.right().ResolvedValue() & 0x1F) != 0 &&
+ (mleft.right().ResolvedValue() & 0x1F) ==
+ (m.right().ResolvedValue() & 0x1F)) {
DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
- int right_val = m.right().Value() & 0x1F;
+ int right_val = m.right().ResolvedValue() & 0x1F;
DCHECK_NE(right_val, 0);
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -1196,14 +1213,15 @@ bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
- if (m.left().IsWord32And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x1F;
+ if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && mleft.right().Value() != 0) {
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
- uint32_t mask = static_cast<uint32_t>(mleft.right().Value() >> lsb)
- << lsb;
+ uint32_t mask =
+ static_cast<uint32_t>(mleft.right().ResolvedValue() >> lsb) << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_msb + mask_width + lsb) == 32) {
@@ -1220,13 +1238,13 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
return;
}
- if (m.left().IsUint32MulHigh() && m.right().HasValue() &&
+ if (m.left().IsUint32MulHigh() && m.right().HasResolvedValue() &&
CanCover(node, node->InputAt(0))) {
// Combine this shift with the multiply and shift that would be generated
// by Uint32MulHigh.
Arm64OperandGenerator g(this);
Node* left = m.left().node();
- int shift = m.right().Value() & 0x1F;
+ int shift = m.right().ResolvedValue() & 0x1F;
InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Umull, smull_operand, g.UseRegister(left->InputAt(0)),
g.UseRegister(left->InputAt(1)));
@@ -1240,14 +1258,15 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
- if (m.left().IsWord64And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x3F;
+ if (m.left().IsWord64And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x3F;
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && mleft.right().Value() != 0) {
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
- uint64_t mask = static_cast<uint64_t>(mleft.right().Value() >> lsb)
- << lsb;
+ uint64_t mask =
+ static_cast<uint64_t>(mleft.right().ResolvedValue() >> lsb) << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_msb + mask_width + lsb) == 64) {
@@ -1270,13 +1289,13 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
}
Int32BinopMatcher m(node);
- if (m.left().IsInt32MulHigh() && m.right().HasValue() &&
+ if (m.left().IsInt32MulHigh() && m.right().HasResolvedValue() &&
CanCover(node, node->InputAt(0))) {
// Combine this shift with the multiply and shift that would be generated
// by Int32MulHigh.
Arm64OperandGenerator g(this);
Node* left = m.left().node();
- int shift = m.right().Value() & 0x1F;
+ int shift = m.right().ResolvedValue() & 0x1F;
InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Smull, smull_operand, g.UseRegister(left->InputAt(0)),
g.UseRegister(left->InputAt(1)));
@@ -1285,7 +1304,7 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
return;
}
- if (m.left().IsInt32Add() && m.right().HasValue() &&
+ if (m.left().IsInt32Add() && m.right().HasResolvedValue() &&
CanCover(node, node->InputAt(0))) {
Node* add_node = m.left().node();
Int32BinopMatcher madd_node(add_node);
@@ -1379,14 +1398,14 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(Float64ExtractLowWord32, kArm64Float64ExtractLowWord32) \
V(Float64ExtractHighWord32, kArm64Float64ExtractHighWord32) \
V(Float64SilenceNaN, kArm64Float64SilenceNaN) \
- V(F32x4Ceil, kArm64F32x4RoundUp) \
- V(F32x4Floor, kArm64F32x4RoundDown) \
- V(F32x4Trunc, kArm64F32x4RoundTruncate) \
- V(F32x4NearestInt, kArm64F32x4RoundTiesEven) \
- V(F64x2Ceil, kArm64F64x2RoundUp) \
- V(F64x2Floor, kArm64F64x2RoundDown) \
- V(F64x2Trunc, kArm64F64x2RoundTruncate) \
- V(F64x2NearestInt, kArm64F64x2RoundTiesEven)
+ V(F32x4Ceil, kArm64Float32RoundUp) \
+ V(F32x4Floor, kArm64Float32RoundDown) \
+ V(F32x4Trunc, kArm64Float32RoundTruncate) \
+ V(F32x4NearestInt, kArm64Float32RoundTiesEven) \
+ V(F64x2Ceil, kArm64Float64RoundUp) \
+ V(F64x2Floor, kArm64Float64RoundDown) \
+ V(F64x2Trunc, kArm64Float64RoundTruncate) \
+ V(F64x2NearestInt, kArm64Float64RoundTiesEven)
#define RRR_OP_LIST(V) \
V(Int32Div, kArm64Idiv32) \
@@ -1632,6 +1651,88 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
VisitRRR(this, kArm64Mul, node);
}
+namespace {
+void VisitExtMul(InstructionSelector* selector, ArchOpcode opcode, Node* node,
+ int dst_lane_size) {
+ InstructionCode code = opcode;
+ code |= MiscField::encode(dst_lane_size);
+ VisitRRR(selector, code, node);
+}
+} // namespace
+
+void InstructionSelector::VisitI16x8ExtMulLowI8x16S(Node* node) {
+ VisitExtMul(this, kArm64Smull, node, 16);
+}
+
+void InstructionSelector::VisitI16x8ExtMulHighI8x16S(Node* node) {
+ VisitExtMul(this, kArm64Smull2, node, 16);
+}
+
+void InstructionSelector::VisitI16x8ExtMulLowI8x16U(Node* node) {
+ VisitExtMul(this, kArm64Umull, node, 16);
+}
+
+void InstructionSelector::VisitI16x8ExtMulHighI8x16U(Node* node) {
+ VisitExtMul(this, kArm64Umull2, node, 16);
+}
+
+void InstructionSelector::VisitI32x4ExtMulLowI16x8S(Node* node) {
+ VisitExtMul(this, kArm64Smull, node, 32);
+}
+
+void InstructionSelector::VisitI32x4ExtMulHighI16x8S(Node* node) {
+ VisitExtMul(this, kArm64Smull2, node, 32);
+}
+
+void InstructionSelector::VisitI32x4ExtMulLowI16x8U(Node* node) {
+ VisitExtMul(this, kArm64Umull, node, 32);
+}
+
+void InstructionSelector::VisitI32x4ExtMulHighI16x8U(Node* node) {
+ VisitExtMul(this, kArm64Umull2, node, 32);
+}
+
+void InstructionSelector::VisitI64x2ExtMulLowI32x4S(Node* node) {
+ VisitExtMul(this, kArm64Smull, node, 64);
+}
+
+void InstructionSelector::VisitI64x2ExtMulHighI32x4S(Node* node) {
+ VisitExtMul(this, kArm64Smull2, node, 64);
+}
+
+void InstructionSelector::VisitI64x2ExtMulLowI32x4U(Node* node) {
+ VisitExtMul(this, kArm64Umull, node, 64);
+}
+
+void InstructionSelector::VisitI64x2ExtMulHighI32x4U(Node* node) {
+ VisitExtMul(this, kArm64Umull2, node, 64);
+}
+
+namespace {
+void VisitExtAddPairwise(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node, int dst_lane_size) {
+ InstructionCode code = opcode;
+ code |= MiscField::encode(dst_lane_size);
+ VisitRR(selector, code, node);
+}
+} // namespace
+
+void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8S(Node* node) {
+ VisitExtAddPairwise(this, kArm64Saddlp, node, 32);
+}
+
+void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8U(Node* node) {
+ VisitExtAddPairwise(this, kArm64Uaddlp, node, 32);
+}
+
+void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16S(Node* node) {
+ VisitExtAddPairwise(this, kArm64Saddlp, node, 16);
+}
+
+void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
+ VisitExtAddPairwise(this, kArm64Uaddlp, node, 16);
+}
+
void InstructionSelector::VisitInt32MulHigh(Node* node) {
Arm64OperandGenerator g(this);
InstructionOperand const smull_operand = g.TempRegister();
@@ -1764,7 +1865,6 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
EmitLoad(this, value, opcode, immediate_mode, rep, node);
return;
@@ -1772,10 +1872,10 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
if (value->opcode() == IrOpcode::kWord32Sar && CanCover(node, value)) {
Int32BinopMatcher m(value);
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
Arm64OperandGenerator g(this);
// Mask the shift amount, to keep the same semantics as Word32Sar.
- int right = m.right().Value() & 0x1F;
+ int right = m.right().ResolvedValue() & 0x1F;
Emit(kArm64Sbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(right), g.TempImmediate(32 - right));
return;
@@ -2211,8 +2311,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
if (opcode == kArm64Cmp && !cont->IsPoisoned()) {
Int64Matcher m(right);
- if (m.HasValue()) {
- if (TryEmitCbzOrTbz<64>(selector, left, m.Value(), node,
+ if (m.HasResolvedValue()) {
+ if (TryEmitCbzOrTbz<64>(selector, left, m.ResolvedValue(), node,
cont->condition(), cont)) {
return;
}
@@ -2228,15 +2328,16 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
Int32BinopMatcher m(node);
FlagsCondition cond = cont->condition();
if (!cont->IsPoisoned()) {
- if (m.right().HasValue()) {
- if (TryEmitCbzOrTbz<32>(selector, m.left().node(), m.right().Value(),
- node, cond, cont)) {
+ if (m.right().HasResolvedValue()) {
+ if (TryEmitCbzOrTbz<32>(selector, m.left().node(),
+ m.right().ResolvedValue(), node, cond, cont)) {
return;
}
- } else if (m.left().HasValue()) {
+ } else if (m.left().HasResolvedValue()) {
FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
- if (TryEmitCbzOrTbz<32>(selector, m.right().node(), m.left().Value(),
- node, commuted_cond, cont)) {
+ if (TryEmitCbzOrTbz<32>(selector, m.right().node(),
+ m.left().ResolvedValue(), node, commuted_cond,
+ cont)) {
return;
}
}
@@ -2313,7 +2414,7 @@ struct TestAndBranchMatcher {
unsigned bit() const {
DCHECK(Matches());
- return base::bits::CountTrailingZeros(matcher_.right().Value());
+ return base::bits::CountTrailingZeros(matcher_.right().ResolvedValue());
}
Node* input() const {
@@ -2328,8 +2429,8 @@ struct TestAndBranchMatcher {
void Initialize() {
if (cont_->IsBranch() && !cont_->IsPoisoned() &&
- matcher_.right().HasValue() &&
- base::bits::IsPowerOfTwo(matcher_.right().Value())) {
+ matcher_.right().HasResolvedValue() &&
+ base::bits::IsPowerOfTwo(matcher_.right().ResolvedValue())) {
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont_->condition() == kEqual) ||
(cont_->condition() == kNotEqual));
@@ -2967,7 +3068,7 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode =
@@ -2988,7 +3089,7 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = kArm64Word64AtomicLoadUint8;
@@ -3010,7 +3111,7 @@ void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kWord32AtomicStoreWord8;
@@ -3029,7 +3130,7 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kArm64Word64AtomicStoreWord8;
@@ -3050,7 +3151,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -3064,13 +3165,12 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kArm64Word64AtomicExchangeUint8;
@@ -3082,13 +3182,12 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
opcode = kArm64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
@@ -3102,13 +3201,12 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kArm64Word64AtomicCompareExchangeUint8;
@@ -3120,7 +3218,6 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
opcode = kArm64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
@@ -3128,7 +3225,7 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = int8_op;
@@ -3142,7 +3239,6 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
opcode = word32_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinop(this, node, opcode);
}
@@ -3164,7 +3260,7 @@ VISIT_ATOMIC_BINOP(Xor)
void InstructionSelector::VisitWord64AtomicBinaryOperation(
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
ArchOpcode uint64_op) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = uint8_op;
@@ -3176,7 +3272,6 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
opcode = uint64_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinop(this, node, opcode);
}
@@ -3223,24 +3318,14 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \
V(I64x2Neg, kArm64I64x2Neg) \
V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \
- V(I32x4SConvertI16x8Low, kArm64I32x4SConvertI16x8Low) \
- V(I32x4SConvertI16x8High, kArm64I32x4SConvertI16x8High) \
V(I32x4Neg, kArm64I32x4Neg) \
V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \
- V(I32x4UConvertI16x8Low, kArm64I32x4UConvertI16x8Low) \
- V(I32x4UConvertI16x8High, kArm64I32x4UConvertI16x8High) \
V(I32x4Abs, kArm64I32x4Abs) \
- V(I16x8SConvertI8x16Low, kArm64I16x8SConvertI8x16Low) \
- V(I16x8SConvertI8x16High, kArm64I16x8SConvertI8x16High) \
V(I16x8Neg, kArm64I16x8Neg) \
- V(I16x8UConvertI8x16Low, kArm64I16x8UConvertI8x16Low) \
- V(I16x8UConvertI8x16High, kArm64I16x8UConvertI8x16High) \
V(I16x8Abs, kArm64I16x8Abs) \
V(I8x16Neg, kArm64I8x16Neg) \
V(I8x16Abs, kArm64I8x16Abs) \
V(S128Not, kArm64S128Not) \
- V(V64x2AnyTrue, kArm64V128AnyTrue) \
- V(V64x2AllTrue, kArm64V64x2AllTrue) \
V(V32x4AnyTrue, kArm64V128AnyTrue) \
V(V32x4AllTrue, kArm64V32x4AllTrue) \
V(V16x8AnyTrue, kArm64V128AnyTrue) \
@@ -3287,11 +3372,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I64x2Add, kArm64I64x2Add) \
V(I64x2Sub, kArm64I64x2Sub) \
V(I64x2Eq, kArm64I64x2Eq) \
- V(I64x2Ne, kArm64I64x2Ne) \
- V(I64x2GtS, kArm64I64x2GtS) \
- V(I64x2GeS, kArm64I64x2GeS) \
- V(I64x2GtU, kArm64I64x2GtU) \
- V(I64x2GeU, kArm64I64x2GeU) \
V(I32x4AddHoriz, kArm64I32x4AddHoriz) \
V(I32x4Mul, kArm64I32x4Mul) \
V(I32x4MinS, kArm64I32x4MinS) \
@@ -3306,9 +3386,9 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4GeU, kArm64I32x4GeU) \
V(I32x4DotI16x8S, kArm64I32x4DotI16x8S) \
V(I16x8SConvertI32x4, kArm64I16x8SConvertI32x4) \
- V(I16x8AddSaturateS, kArm64I16x8AddSaturateS) \
+ V(I16x8AddSatS, kArm64I16x8AddSatS) \
V(I16x8AddHoriz, kArm64I16x8AddHoriz) \
- V(I16x8SubSaturateS, kArm64I16x8SubSaturateS) \
+ V(I16x8SubSatS, kArm64I16x8SubSatS) \
V(I16x8Mul, kArm64I16x8Mul) \
V(I16x8MinS, kArm64I16x8MinS) \
V(I16x8MaxS, kArm64I16x8MaxS) \
@@ -3317,16 +3397,17 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8GtS, kArm64I16x8GtS) \
V(I16x8GeS, kArm64I16x8GeS) \
V(I16x8UConvertI32x4, kArm64I16x8UConvertI32x4) \
- V(I16x8AddSaturateU, kArm64I16x8AddSaturateU) \
- V(I16x8SubSaturateU, kArm64I16x8SubSaturateU) \
+ V(I16x8AddSatU, kArm64I16x8AddSatU) \
+ V(I16x8SubSatU, kArm64I16x8SubSatU) \
V(I16x8MinU, kArm64I16x8MinU) \
V(I16x8MaxU, kArm64I16x8MaxU) \
V(I16x8GtU, kArm64I16x8GtU) \
V(I16x8GeU, kArm64I16x8GeU) \
V(I16x8RoundingAverageU, kArm64I16x8RoundingAverageU) \
+ V(I16x8Q15MulRSatS, kArm64I16x8Q15MulRSatS) \
V(I8x16SConvertI16x8, kArm64I8x16SConvertI16x8) \
- V(I8x16AddSaturateS, kArm64I8x16AddSaturateS) \
- V(I8x16SubSaturateS, kArm64I8x16SubSaturateS) \
+ V(I8x16AddSatS, kArm64I8x16AddSatS) \
+ V(I8x16SubSatS, kArm64I8x16SubSatS) \
V(I8x16Mul, kArm64I8x16Mul) \
V(I8x16MinS, kArm64I8x16MinS) \
V(I8x16MaxS, kArm64I8x16MaxS) \
@@ -3335,8 +3416,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16GtS, kArm64I8x16GtS) \
V(I8x16GeS, kArm64I8x16GeS) \
V(I8x16UConvertI16x8, kArm64I8x16UConvertI16x8) \
- V(I8x16AddSaturateU, kArm64I8x16AddSaturateU) \
- V(I8x16SubSaturateU, kArm64I8x16SubSaturateU) \
+ V(I8x16AddSatU, kArm64I8x16AddSatU) \
+ V(I8x16SubSatU, kArm64I8x16SubSatU) \
V(I8x16MinU, kArm64I8x16MinU) \
V(I8x16MaxU, kArm64I8x16MaxU) \
V(I8x16GtU, kArm64I8x16GtU) \
@@ -3716,6 +3797,69 @@ void InstructionSelector::VisitF64x2Pmax(Node* node) {
VisitPminOrPmax(this, kArm64F64x2Pmax, node);
}
+namespace {
+void VisitSignExtendLong(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node, int lane_size) {
+ InstructionCode code = opcode;
+ code |= MiscField::encode(lane_size);
+ VisitRR(selector, code, node);
+}
+} // namespace
+
+void InstructionSelector::VisitI64x2SConvertI32x4Low(Node* node) {
+ VisitSignExtendLong(this, kArm64Sxtl, node, 64);
+}
+
+void InstructionSelector::VisitI64x2SConvertI32x4High(Node* node) {
+ VisitSignExtendLong(this, kArm64Sxtl2, node, 64);
+}
+
+void InstructionSelector::VisitI64x2UConvertI32x4Low(Node* node) {
+ VisitSignExtendLong(this, kArm64Uxtl, node, 64);
+}
+
+void InstructionSelector::VisitI64x2UConvertI32x4High(Node* node) {
+ VisitSignExtendLong(this, kArm64Uxtl2, node, 64);
+}
+
+void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
+ VisitSignExtendLong(this, kArm64Sxtl, node, 32);
+}
+
+void InstructionSelector::VisitI32x4SConvertI16x8High(Node* node) {
+ VisitSignExtendLong(this, kArm64Sxtl2, node, 32);
+}
+
+void InstructionSelector::VisitI32x4UConvertI16x8Low(Node* node) {
+ VisitSignExtendLong(this, kArm64Uxtl, node, 32);
+}
+
+void InstructionSelector::VisitI32x4UConvertI16x8High(Node* node) {
+ VisitSignExtendLong(this, kArm64Uxtl2, node, 32);
+}
+
+void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) {
+ VisitSignExtendLong(this, kArm64Sxtl, node, 16);
+}
+
+void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
+ VisitSignExtendLong(this, kArm64Sxtl2, node, 16);
+}
+
+void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
+ VisitSignExtendLong(this, kArm64Uxtl, node, 16);
+}
+
+void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
+ VisitSignExtendLong(this, kArm64Uxtl2, node, 16);
+}
+
+void InstructionSelector::VisitI8x16Popcnt(Node* node) {
+ InstructionCode code = kArm64Cnt;
+ code |= MiscField::encode(8);
+ VisitRR(this, code, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h
index 88f82fe930..93113b97ca 100644
--- a/deps/v8/src/compiler/backend/code-generator-impl.h
+++ b/deps/v8/src/compiler/backend/code-generator-impl.h
@@ -257,17 +257,6 @@ class OutOfLineCode : public ZoneObject {
OutOfLineCode* const next_;
};
-inline bool HasCallDescriptorFlag(Instruction* instr,
- CallDescriptor::Flag flag) {
- STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode == 10);
-#ifdef DEBUG
- static constexpr int kInstructionCodeFlagsMask =
- ((1 << CallDescriptor::kFlagsBitsEncodedInInstructionCode) - 1);
- DCHECK_EQ(static_cast<int>(flag) & kInstructionCodeFlagsMask, flag);
-#endif
- return MiscField::decode(instr->opcode()) & flag;
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index 33a80f52d0..0cb0e6172f 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -162,8 +162,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
DeoptimizeKind deopt_kind = exit->kind();
DeoptimizeReason deoptimization_reason = exit->reason();
- Address deopt_entry =
+ Builtins::Name deopt_entry =
Deoptimizer::GetDeoptimizationEntry(tasm()->isolate(), deopt_kind);
+ Label* jump_deoptimization_entry_label =
+ &jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)];
if (info()->source_positions()) {
tasm()->RecordDeoptReason(deoptimization_reason, exit->pos(),
deoptimization_id);
@@ -177,7 +179,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
}
tasm()->CallForDeoptimization(deopt_entry, deoptimization_id, exit->label(),
- deopt_kind);
+ deopt_kind, jump_deoptimization_entry_label);
exit->set_emitted();
return kSuccess;
}
@@ -324,7 +326,7 @@ void CodeGenerator::AssembleCode() {
// For some targets, we must make sure that constant and veneer pools are
// emitted before emitting the deoptimization exits.
- PrepareForDeoptimizationExits(static_cast<int>(deoptimization_exits_.size()));
+ PrepareForDeoptimizationExits(&deoptimization_exits_);
if (Deoptimizer::kSupportsFixedDeoptExitSizes) {
deopt_exit_start_offset_ = tasm()->pc_offset();
@@ -338,7 +340,7 @@ void CodeGenerator::AssembleCode() {
// Deoptimizer::kSupportsFixedDeoptExitSizes is true, lazy deopts
// might need additional instructions.
auto cmp = [](const DeoptimizationExit* a, const DeoptimizationExit* b) {
- static_assert(DeoptimizeKind::kLazy == DeoptimizeKind::kLastDeoptimizeKind,
+ static_assert(DeoptimizeKind::kLazy == kLastDeoptimizeKind,
"lazy deopts are expected to be emitted last");
if (a->kind() != b->kind()) {
return a->kind() < b->kind();
@@ -391,6 +393,9 @@ void CodeGenerator::AssembleCode() {
// size as reported by perf.
unwinding_info_writer_.Finish(tasm()->pc_offset());
+ // Final alignment before starting on the metadata section.
+ tasm()->Align(Code::kMetadataAlignment);
+
safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
// Emit the exception handler table.
@@ -517,8 +522,9 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
CHECK_IMPLIES(info()->IsNativeContextIndependent(),
code->IsNativeContextIndependent(isolate()));
+ // Counts both compiled code and metadata.
isolate()->counters()->total_compiled_code_size()->Increment(
- code->raw_instruction_size());
+ code->raw_body_size());
LOG_CODE_EVENT(isolate(),
CodeLinePosInfoRecordEvent(code->raw_instruction_start(),
@@ -974,12 +980,12 @@ Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
void CodeGenerator::RecordCallPosition(Instruction* instr) {
const bool needs_frame_state =
- HasCallDescriptorFlag(instr, CallDescriptor::kNeedsFrameState);
+ instr->HasCallDescriptorFlag(CallDescriptor::kNeedsFrameState);
RecordSafepoint(instr->reference_map(), needs_frame_state
? Safepoint::kLazyDeopt
: Safepoint::kNoLazyDeopt);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kHasExceptionHandler)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler)) {
InstructionOperandConverter i(this, instr);
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler());
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index 26d03f129a..6181bc7d15 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -406,7 +406,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
InstructionOperand* op, MachineType type);
void MarkLazyDeoptSite();
- void PrepareForDeoptimizationExits(int deopt_count);
+ void PrepareForDeoptimizationExits(ZoneDeque<DeoptimizationExit*>* exits);
DeoptimizationExit* AddDeoptimizationExit(Instruction* instr,
size_t frame_state_offset);
@@ -446,6 +446,14 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
int handler_table_offset_ = 0;
int last_lazy_deopt_pc_ = 0;
+ // Deoptimization exits must be as small as possible, since their count grows
+ // with function size. {jump_deoptimization_entry_labels_} is an optimization
+ // to that effect, which extracts the (potentially large) instruction
+ // sequence for the final jump to the deoptimization entry into a single spot
+ // per Code object. All deopt exits can then near-call to this label. Note:
+ // not used on all architectures.
+ Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount];
+
// The maximal combined height of all frames produced upon deoptimization, and
// the maximal number of pushed arguments for function calls. Applied as an
// offset to the first stack check of an optimized function.
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 077324a31f..1820e39799 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -695,10 +695,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
@@ -723,7 +723,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
__ wasm_call(wasm_code, constant.rmode());
} else {
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(wasm_code, constant.rmode());
} else {
__ call(wasm_code, constant.rmode());
@@ -731,7 +731,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
} else {
Register reg = i.InputRegister(0);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
@@ -753,10 +753,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
@@ -773,7 +773,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ jmp(wasm_code, constant.rmode());
} else {
Register reg = i.InputRegister(0);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
@@ -787,9 +787,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
@@ -927,8 +927,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- CodeGenResult result = AssembleDeoptimizerCall(exit);
- if (result != kSuccess) return result;
+ __ jmp(exit->label());
break;
}
case kArchRet:
@@ -2208,18 +2207,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEF32x4Abs: {
XMMRegister dst = i.OutputSimd128Register();
- Operand src = i.InputOperand(0);
- if (src.is_reg(dst)) {
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrld(kScratchDoubleReg, 1);
- __ andps(dst, kScratchDoubleReg);
- } else {
- // TODO(zhin) Improve codegen for this case.
- __ pcmpeqd(dst, dst);
- __ movups(kScratchDoubleReg, src);
- __ psrld(dst, 1);
- __ andps(dst, kScratchDoubleReg);
- }
+ DCHECK_EQ(i.InputSimd128Register(0), dst);
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrld(kScratchDoubleReg, 1);
+ __ andps(dst, kScratchDoubleReg);
break;
}
case kAVXF32x4Abs: {
@@ -2232,18 +2223,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEF32x4Neg: {
XMMRegister dst = i.OutputSimd128Register();
- Operand src = i.InputOperand(0);
- if (src.is_reg(dst)) {
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ pslld(kScratchDoubleReg, 31);
- __ xorps(dst, kScratchDoubleReg);
- } else {
- // TODO(zhin) Improve codegen for this case.
- __ pcmpeqd(dst, dst);
- __ movups(kScratchDoubleReg, src);
- __ pslld(dst, 31);
- __ xorps(dst, kScratchDoubleReg);
- }
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ pslld(kScratchDoubleReg, 31);
+ __ xorps(dst, kScratchDoubleReg);
break;
}
case kAVXF32x4Neg: {
@@ -2255,9 +2238,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEF32x4Sqrt: {
- // TODO(zhin) Improve codegen for this case.
- __ movups(kScratchDoubleReg, i.InputOperand(0));
- __ sqrtps(i.OutputSimd128Register(), kScratchDoubleReg);
+ __ sqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kAVXF32x4Sqrt: {
@@ -2882,7 +2863,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEI16x8SConvertI32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ packssdw(i.OutputSimd128Register(), i.InputOperand(1));
+ __ packssdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kAVXI16x8SConvertI32x4: {
@@ -2902,12 +2883,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kSSEI16x8AddSaturateS: {
+ case kSSEI16x8AddSatS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddsw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
- case kAVXI16x8AddSaturateS: {
+ case kAVXI16x8AddSatS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
@@ -2936,12 +2917,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kSSEI16x8SubSaturateS: {
+ case kSSEI16x8SubSatS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubsw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
- case kAVXI16x8SubSaturateS: {
+ case kAVXI16x8SubSatS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
@@ -3051,33 +3032,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEI16x8UConvertI32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ packusdw(i.OutputSimd128Register(), i.InputOperand(1));
+ __ packusdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kAVXI16x8UConvertI32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
- __ vpackusdw(dst, dst, i.InputOperand(1));
+ __ vpackusdw(dst, dst, i.InputSimd128Register(1));
break;
}
- case kSSEI16x8AddSaturateU: {
+ case kSSEI16x8AddSatU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddusw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
- case kAVXI16x8AddSaturateU: {
+ case kAVXI16x8AddSatU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8SubSaturateU: {
+ case kSSEI16x8SubSatU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubusw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
- case kAVXI16x8SubSaturateU: {
+ case kAVXI16x8SubSatU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
@@ -3290,12 +3271,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kSSEI8x16AddSaturateS: {
+ case kSSEI8x16AddSatS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddsb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
- case kAVXI8x16AddSaturateS: {
+ case kAVXI8x16AddSatS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
@@ -3312,12 +3293,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kSSEI8x16SubSaturateS: {
+ case kSSEI8x16SubSatS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubsb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
- case kAVXI8x16SubSaturateS: {
+ case kAVXI8x16SubSatS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
@@ -3495,23 +3476,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpackuswb(dst, dst, i.InputOperand(1));
break;
}
- case kSSEI8x16AddSaturateU: {
+ case kSSEI8x16AddSatU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddusb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
- case kAVXI8x16AddSaturateU: {
+ case kAVXI8x16AddSatU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddusb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI8x16SubSaturateU: {
+ case kSSEI8x16SubSatU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubusb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
- case kAVXI8x16SubSaturateU: {
+ case kAVXI8x16SubSatU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubusb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
@@ -3645,16 +3626,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSES128Not: {
XMMRegister dst = i.OutputSimd128Register();
- Operand src = i.InputOperand(0);
- if (src.is_reg(dst)) {
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ pxor(dst, kScratchDoubleReg);
- } else {
- // TODO(zhin) Improve codegen for this case.
- __ pcmpeqd(dst, dst);
- __ movups(kScratchDoubleReg, src);
- __ pxor(dst, kScratchDoubleReg);
- }
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(dst, kScratchDoubleReg);
break;
}
case kAVXS128Not: {
@@ -3781,48 +3755,48 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(esp, tmp);
break;
}
- case kIA32S8x16LoadSplat: {
+ case kIA32S128Load8Splat: {
__ Pinsrb(i.OutputSimd128Register(), i.MemoryOperand(), 0);
__ Pxor(kScratchDoubleReg, kScratchDoubleReg);
__ Pshufb(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
- case kIA32S16x8LoadSplat: {
+ case kIA32S128Load16Splat: {
__ Pinsrw(i.OutputSimd128Register(), i.MemoryOperand(), 0);
__ Pshuflw(i.OutputSimd128Register(), i.OutputSimd128Register(),
uint8_t{0});
__ Punpcklqdq(i.OutputSimd128Register(), i.OutputSimd128Register());
break;
}
- case kIA32S32x4LoadSplat: {
+ case kIA32S128Load32Splat: {
__ Vbroadcastss(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kIA32S64x2LoadSplat: {
+ case kIA32S128Load64Splat: {
__ Movddup(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kIA32I16x8Load8x8S: {
+ case kIA32S128Load8x8S: {
__ Pmovsxbw(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kIA32I16x8Load8x8U: {
+ case kIA32S128Load8x8U: {
__ Pmovzxbw(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kIA32I32x4Load16x4S: {
+ case kIA32S128Load16x4S: {
__ Pmovsxwd(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kIA32I32x4Load16x4U: {
+ case kIA32S128Load16x4U: {
__ Pmovzxwd(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kIA32I64x2Load32x2S: {
+ case kIA32S128Load32x2S: {
__ Pmovsxdq(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kIA32I64x2Load32x2U: {
+ case kIA32S128Load32x2U: {
__ Pmovzxdq(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
@@ -4795,7 +4769,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -4811,37 +4785,86 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
}
- // Might need ecx for scratch if pop_size is too big or if there is a variable
- // pop count.
+ // We might need ecx and edx for scratch.
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & edx.bit());
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & ecx.bit());
- size_t pop_size = call_descriptor->StackParameterCount() * kSystemPointerSize;
IA32OperandConverter g(this, nullptr);
+ int parameter_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
+
+ // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_count != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (__ emit_debug_code()) {
+ __ cmp(g.ToRegister(additional_pop_count), Immediate(0));
+ __ Assert(equal, AbortReason::kUnexpectedAdditionalPopValue);
+ }
+ }
+
+ Register argc_reg = ecx;
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_count} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_count != 0;
+#else
+ const bool drop_jsargs = false;
+#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now if they always have the same
// number of return args.
- if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
if (return_label_.is_bound()) {
__ jmp(&return_label_);
return;
} else {
__ bind(&return_label_);
- AssembleDeconstructFrame();
}
- } else {
- AssembleDeconstructFrame();
}
+ if (drop_jsargs) {
+ // Get the actual argument count.
+ __ mov(argc_reg, Operand(ebp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
}
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & edx.bit());
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & ecx.bit());
- if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
- pop_size += g.ToConstant(pop).ToInt32() * kSystemPointerSize;
- __ Ret(static_cast<int>(pop_size), ecx);
+
+ if (drop_jsargs) {
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ int parameter_count_without_receiver =
+ parameter_count - 1; // Exclude the receiver to simplify the
+ // computation. We'll account for it at the end.
+ Label mismatch_return;
+ Register scratch_reg = edx;
+ DCHECK_NE(argc_reg, scratch_reg);
+ __ cmp(argc_reg, Immediate(parameter_count_without_receiver));
+ __ j(greater, &mismatch_return, Label::kNear);
+ __ Ret(parameter_count * kSystemPointerSize, scratch_reg);
+ __ bind(&mismatch_return);
+ __ PopReturnAddressTo(scratch_reg);
+ __ lea(esp, Operand(esp, argc_reg, times_system_pointer_size,
+ kSystemPointerSize)); // Also pop the receiver.
+ // We use a return instead of a jump for better return address prediction.
+ __ PushReturnAddressFrom(scratch_reg);
+ __ Ret();
+ } else if (additional_pop_count->IsImmediate()) {
+ Register scratch_reg = ecx;
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ size_t pop_size = (parameter_count + additional_count) * kSystemPointerSize;
+ CHECK_LE(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
+ __ Ret(static_cast<int>(pop_size), scratch_reg);
} else {
- Register pop_reg = g.ToRegister(pop);
+ Register pop_reg = g.ToRegister(additional_pop_count);
Register scratch_reg = pop_reg == ecx ? edx : ecx;
+ int pop_size = static_cast<int>(parameter_count * kSystemPointerSize);
__ PopReturnAddressTo(scratch_reg);
__ lea(esp, Operand(esp, pop_reg, times_system_pointer_size,
static_cast<int>(pop_size)));
@@ -4852,7 +4875,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
void CodeGenerator::FinishCode() {}
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index eca9dc9227..a56486479d 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -249,14 +249,14 @@ namespace compiler {
V(AVXI16x8SConvertI32x4) \
V(SSEI16x8Add) \
V(AVXI16x8Add) \
- V(SSEI16x8AddSaturateS) \
- V(AVXI16x8AddSaturateS) \
+ V(SSEI16x8AddSatS) \
+ V(AVXI16x8AddSatS) \
V(SSEI16x8AddHoriz) \
V(AVXI16x8AddHoriz) \
V(SSEI16x8Sub) \
V(AVXI16x8Sub) \
- V(SSEI16x8SubSaturateS) \
- V(AVXI16x8SubSaturateS) \
+ V(SSEI16x8SubSatS) \
+ V(AVXI16x8SubSatS) \
V(SSEI16x8Mul) \
V(AVXI16x8Mul) \
V(SSEI16x8MinS) \
@@ -276,10 +276,10 @@ namespace compiler {
V(IA32I16x8ShrU) \
V(SSEI16x8UConvertI32x4) \
V(AVXI16x8UConvertI32x4) \
- V(SSEI16x8AddSaturateU) \
- V(AVXI16x8AddSaturateU) \
- V(SSEI16x8SubSaturateU) \
- V(AVXI16x8SubSaturateU) \
+ V(SSEI16x8AddSatU) \
+ V(AVXI16x8AddSatU) \
+ V(SSEI16x8SubSatU) \
+ V(AVXI16x8SubSatU) \
V(SSEI16x8MinU) \
V(AVXI16x8MinU) \
V(SSEI16x8MaxU) \
@@ -303,12 +303,12 @@ namespace compiler {
V(IA32I8x16ShrS) \
V(SSEI8x16Add) \
V(AVXI8x16Add) \
- V(SSEI8x16AddSaturateS) \
- V(AVXI8x16AddSaturateS) \
+ V(SSEI8x16AddSatS) \
+ V(AVXI8x16AddSatS) \
V(SSEI8x16Sub) \
V(AVXI8x16Sub) \
- V(SSEI8x16SubSaturateS) \
- V(AVXI8x16SubSaturateS) \
+ V(SSEI8x16SubSatS) \
+ V(AVXI8x16SubSatS) \
V(SSEI8x16Mul) \
V(AVXI8x16Mul) \
V(SSEI8x16MinS) \
@@ -325,10 +325,10 @@ namespace compiler {
V(AVXI8x16GeS) \
V(SSEI8x16UConvertI16x8) \
V(AVXI8x16UConvertI16x8) \
- V(SSEI8x16AddSaturateU) \
- V(AVXI8x16AddSaturateU) \
- V(SSEI8x16SubSaturateU) \
- V(AVXI8x16SubSaturateU) \
+ V(SSEI8x16AddSatU) \
+ V(AVXI8x16AddSatU) \
+ V(SSEI8x16SubSatU) \
+ V(AVXI8x16SubSatU) \
V(IA32I8x16ShrU) \
V(SSEI8x16MinU) \
V(AVXI8x16MinU) \
@@ -357,16 +357,16 @@ namespace compiler {
V(IA32S128AndNot) \
V(IA32I8x16Swizzle) \
V(IA32I8x16Shuffle) \
- V(IA32S8x16LoadSplat) \
- V(IA32S16x8LoadSplat) \
- V(IA32S32x4LoadSplat) \
- V(IA32S64x2LoadSplat) \
- V(IA32I16x8Load8x8S) \
- V(IA32I16x8Load8x8U) \
- V(IA32I32x4Load16x4S) \
- V(IA32I32x4Load16x4U) \
- V(IA32I64x2Load32x2S) \
- V(IA32I64x2Load32x2U) \
+ V(IA32S128Load8Splat) \
+ V(IA32S128Load16Splat) \
+ V(IA32S128Load32Splat) \
+ V(IA32S128Load64Splat) \
+ V(IA32S128Load8x8S) \
+ V(IA32S128Load8x8U) \
+ V(IA32S128Load16x4S) \
+ V(IA32S128Load16x4U) \
+ V(IA32S128Load32x2S) \
+ V(IA32S128Load32x2U) \
V(IA32S32x4Swizzle) \
V(IA32S32x4Shuffle) \
V(IA32S16x8Blend) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 24abd58c7f..c8f3b19d0f 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -230,14 +230,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI16x8SConvertI32x4:
case kSSEI16x8Add:
case kAVXI16x8Add:
- case kSSEI16x8AddSaturateS:
- case kAVXI16x8AddSaturateS:
+ case kSSEI16x8AddSatS:
+ case kAVXI16x8AddSatS:
case kSSEI16x8AddHoriz:
case kAVXI16x8AddHoriz:
case kSSEI16x8Sub:
case kAVXI16x8Sub:
- case kSSEI16x8SubSaturateS:
- case kAVXI16x8SubSaturateS:
+ case kSSEI16x8SubSatS:
+ case kAVXI16x8SubSatS:
case kSSEI16x8Mul:
case kAVXI16x8Mul:
case kSSEI16x8MinS:
@@ -257,10 +257,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I16x8ShrU:
case kSSEI16x8UConvertI32x4:
case kAVXI16x8UConvertI32x4:
- case kSSEI16x8AddSaturateU:
- case kAVXI16x8AddSaturateU:
- case kSSEI16x8SubSaturateU:
- case kAVXI16x8SubSaturateU:
+ case kSSEI16x8AddSatU:
+ case kAVXI16x8AddSatU:
+ case kSSEI16x8SubSatU:
+ case kAVXI16x8SubSatU:
case kSSEI16x8MinU:
case kAVXI16x8MinU:
case kSSEI16x8MaxU:
@@ -284,12 +284,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I8x16ShrS:
case kSSEI8x16Add:
case kAVXI8x16Add:
- case kSSEI8x16AddSaturateS:
- case kAVXI8x16AddSaturateS:
+ case kSSEI8x16AddSatS:
+ case kAVXI8x16AddSatS:
case kSSEI8x16Sub:
case kAVXI8x16Sub:
- case kSSEI8x16SubSaturateS:
- case kAVXI8x16SubSaturateS:
+ case kSSEI8x16SubSatS:
+ case kAVXI8x16SubSatS:
case kSSEI8x16Mul:
case kAVXI8x16Mul:
case kSSEI8x16MinS:
@@ -306,10 +306,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI8x16GeS:
case kSSEI8x16UConvertI16x8:
case kAVXI8x16UConvertI16x8:
- case kSSEI8x16AddSaturateU:
- case kAVXI8x16AddSaturateU:
- case kSSEI8x16SubSaturateU:
- case kAVXI8x16SubSaturateU:
+ case kSSEI8x16AddSatU:
+ case kAVXI8x16AddSatU:
+ case kSSEI8x16SubSatU:
+ case kAVXI8x16SubSatU:
case kIA32I8x16ShrU:
case kSSEI8x16MinU:
case kAVXI8x16MinU:
@@ -399,16 +399,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Movsd:
case kIA32Movdqu:
// Moves are used for memory load/store operations.
- case kIA32S8x16LoadSplat:
- case kIA32S16x8LoadSplat:
- case kIA32S32x4LoadSplat:
- case kIA32S64x2LoadSplat:
- case kIA32I16x8Load8x8S:
- case kIA32I16x8Load8x8U:
- case kIA32I32x4Load16x4S:
- case kIA32I32x4Load16x4U:
- case kIA32I64x2Load32x2S:
- case kIA32I64x2Load32x2U:
+ case kIA32S128Load8Splat:
+ case kIA32S128Load16Splat:
+ case kIA32S128Load32Splat:
+ case kIA32S128Load64Splat:
+ case kIA32S128Load8x8S:
+ case kIA32S128Load8x8U:
+ case kIA32S128Load16x4S:
+ case kIA32S128Load16x4U:
+ case kIA32S128Load32x2S:
+ case kIA32S128Load32x2U:
return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
case kIA32Peek:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index fec4053871..c16584a195 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -162,12 +162,13 @@ class IA32OperandGenerator final : public OperandGenerator {
RegisterMode register_mode = kRegister) {
{
LoadMatcher<ExternalReferenceMatcher> m(node);
- if (m.index().HasValue() && m.object().HasValue() &&
- selector()->CanAddressRelativeToRootsRegister(m.object().Value())) {
+ if (m.index().HasResolvedValue() && m.object().HasResolvedValue() &&
+ selector()->CanAddressRelativeToRootsRegister(
+ m.object().ResolvedValue())) {
ptrdiff_t const delta =
- m.index().Value() +
+ m.index().ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
- selector()->isolate(), m.object().Value());
+ selector()->isolate(), m.object().ResolvedValue());
if (is_int32(delta)) {
inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
return kMode_Root;
@@ -364,46 +365,52 @@ void InstructionSelector::VisitAbortCSAAssert(Node* node) {
void InstructionSelector::VisitLoadTransform(Node* node) {
LoadTransformParameters params = LoadTransformParametersOf(node->op());
- InstructionCode opcode = kArchNop;
+ InstructionCode opcode;
switch (params.transformation) {
- case LoadTransformation::kS8x16LoadSplat:
- opcode = kIA32S8x16LoadSplat;
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kIA32S128Load8Splat;
break;
- case LoadTransformation::kS16x8LoadSplat:
- opcode = kIA32S16x8LoadSplat;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kIA32S128Load16Splat;
break;
- case LoadTransformation::kS32x4LoadSplat:
- opcode = kIA32S32x4LoadSplat;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kIA32S128Load32Splat;
break;
- case LoadTransformation::kS64x2LoadSplat:
- opcode = kIA32S64x2LoadSplat;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kIA32S128Load64Splat;
break;
- case LoadTransformation::kI16x8Load8x8S:
- opcode = kIA32I16x8Load8x8S;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kIA32S128Load8x8S;
break;
- case LoadTransformation::kI16x8Load8x8U:
- opcode = kIA32I16x8Load8x8U;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kIA32S128Load8x8U;
break;
- case LoadTransformation::kI32x4Load16x4S:
- opcode = kIA32I32x4Load16x4S;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kIA32S128Load16x4S;
break;
- case LoadTransformation::kI32x4Load16x4U:
- opcode = kIA32I32x4Load16x4U;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kIA32S128Load16x4U;
break;
- case LoadTransformation::kI64x2Load32x2S:
- opcode = kIA32I64x2Load32x2S;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kIA32S128Load32x2S;
break;
- case LoadTransformation::kI64x2Load32x2U:
- opcode = kIA32I64x2Load32x2U;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kIA32S128Load32x2U;
+ break;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kIA32Movss;
+ break;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kIA32Movsd;
break;
default:
UNREACHABLE();
}
// IA32 supports unaligned loads.
- DCHECK_NE(params.kind, LoadKind::kUnaligned);
+ DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
// Trap handler is not supported on IA32.
- DCHECK_NE(params.kind, LoadKind::kProtected);
+ DCHECK_NE(params.kind, MemoryAccessKind::kProtected);
IA32OperandGenerator g(this);
InstructionOperand outputs[1];
@@ -419,7 +426,7 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kIA32Movss;
@@ -503,7 +510,7 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count, temps);
} else {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kIA32Movss;
@@ -532,7 +539,6 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
InstructionOperand val;
@@ -1779,7 +1785,8 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Float64Matcher mleft(left);
- if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
+ if (mleft.HasResolvedValue() &&
+ (bit_cast<uint64_t>(mleft.ResolvedValue()) >> 32) == 0u) {
Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
return;
}
@@ -1818,7 +1825,7 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
IA32OperandGenerator g(this);
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kWord32AtomicExchangeInt8;
@@ -1838,7 +1845,7 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
IA32OperandGenerator g(this);
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -1851,7 +1858,6 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode, type.representation());
}
@@ -1864,7 +1870,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* new_value = node->InputAt(3);
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -1877,7 +1883,6 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
- return;
}
AddressingMode addressing_mode;
InstructionOperand new_val_operand =
@@ -1896,7 +1901,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = int8_op;
} else if (type == MachineType::Uint8()) {
@@ -1909,7 +1914,6 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
opcode = word32_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinOp(this, node, opcode, type.representation());
}
@@ -2079,10 +2083,10 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I32x4GeU) \
V(I16x8SConvertI32x4) \
V(I16x8Add) \
- V(I16x8AddSaturateS) \
+ V(I16x8AddSatS) \
V(I16x8AddHoriz) \
V(I16x8Sub) \
- V(I16x8SubSaturateS) \
+ V(I16x8SubSatS) \
V(I16x8Mul) \
V(I16x8MinS) \
V(I16x8MaxS) \
@@ -2090,25 +2094,25 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I16x8Ne) \
V(I16x8GtS) \
V(I16x8GeS) \
- V(I16x8AddSaturateU) \
- V(I16x8SubSaturateU) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
V(I16x8MinU) \
V(I16x8MaxU) \
V(I16x8GtU) \
V(I16x8GeU) \
V(I8x16SConvertI16x8) \
V(I8x16Add) \
- V(I8x16AddSaturateS) \
+ V(I8x16AddSatS) \
V(I8x16Sub) \
- V(I8x16SubSaturateS) \
+ V(I8x16SubSatS) \
V(I8x16MinS) \
V(I8x16MaxS) \
V(I8x16Eq) \
V(I8x16Ne) \
V(I8x16GtS) \
V(I8x16GeS) \
- V(I8x16AddSaturateU) \
- V(I8x16SubSaturateU) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU) \
V(I8x16MinU) \
V(I8x16MaxU) \
V(I8x16GtU) \
@@ -2234,9 +2238,15 @@ void InstructionSelector::VisitF64x2ExtractLane(Node* node) {
void InstructionSelector::VisitI64x2SplatI32Pair(Node* node) {
IA32OperandGenerator g(this);
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- InstructionOperand operand1 = g.Use(node->InputAt(1));
- Emit(kIA32I64x2SplatI32Pair, g.DefineAsRegister(node), operand0, operand1);
+ Int32Matcher match_left(node->InputAt(0));
+ Int32Matcher match_right(node->InputAt(1));
+ if (match_left.Is(0) && match_right.Is(0)) {
+ Emit(kIA32S128Zero, g.DefineAsRegister(node));
+ } else {
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.Use(node->InputAt(1));
+ Emit(kIA32I64x2SplatI32Pair, g.DefineAsRegister(node), operand0, operand1);
+ }
}
void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) {
@@ -2333,6 +2343,7 @@ void InstructionSelector::VisitS128Select(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand operand2 = g.UseRegister(node->InputAt(2));
if (IsSupported(AVX)) {
+ // AVX supports unaligned memory operands, so Use here is okay.
Emit(kAVXS128Select, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
g.Use(node->InputAt(1)), operand2);
} else {
@@ -2351,7 +2362,13 @@ void InstructionSelector::VisitS128AndNot(Node* node) {
#define VISIT_SIMD_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
- VisitRO(this, node, kIA32##Type##Splat); \
+ Int32Matcher int32_matcher(node->InputAt(0)); \
+ if (int32_matcher.Is(0)) { \
+ IA32OperandGenerator g(this); \
+ Emit(kIA32S128Zero, g.DefineAsRegister(node)); \
+ } else { \
+ VisitRO(this, node, kIA32##Type##Splat); \
+ } \
}
SIMD_INT_TYPES(VISIT_SIMD_SPLAT)
#undef VISIT_SIMD_SPLAT
@@ -2431,11 +2448,20 @@ SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
#undef VISIT_SIMD_UNOP
#undef SIMD_UNOP_LIST
+// TODO(v8:9198): SSE instructions that read 16 bytes from memory require the
+// operand to be 16-byte aligned. AVX instructions relax this requirement, but
+// might have reduced performance if the memory crosses cache line. But since we
+// have limited xmm registers, this might be okay to alleviate register
+// pressure.
#define VISIT_SIMD_UNOP_PREFIX(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
IA32OperandGenerator g(this); \
- InstructionCode opcode = IsSupported(AVX) ? kAVX##Opcode : kSSE##Opcode; \
- Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
+ if (IsSupported(AVX)) { \
+ Emit(kAVX##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
+ } else { \
+ Emit(kSSE##Opcode, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0))); \
+ } \
}
SIMD_UNOP_PREFIX_LIST(VISIT_SIMD_UNOP_PREFIX)
#undef VISIT_SIMD_UNOP_PREFIX
@@ -2479,11 +2505,15 @@ SIMD_BINOP_UNIFIED_SSE_AVX_LIST(VISIT_SIMD_BINOP_UNIFIED_SSE_AVX)
#undef VISIT_SIMD_BINOP_UNIFIED_SSE_AVX
#undef SIMD_BINOP_UNIFIED_SSE_AVX_LIST
+// TODO(v8:9198): SSE requires operand1 to be a register as we don't have memory
+// alignment yet. For AVX, memory operands are fine, but can have performance
+// issues if not aligned to 16/32 bytes (based on load size), see SDM Vol 1,
+// chapter 14.9
void VisitPack(InstructionSelector* selector, Node* node, ArchOpcode avx_opcode,
ArchOpcode sse_opcode) {
IA32OperandGenerator g(selector);
InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- InstructionOperand operand1 = g.Use(node->InputAt(1));
+ InstructionOperand operand1 = g.UseRegister(node->InputAt(1));
if (selector->IsSupported(AVX)) {
selector->Emit(avx_opcode, g.DefineSameAsFirst(node), operand0, operand1);
} else {
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 8772a78df0..f9e68cea57 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -63,104 +63,108 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
// Target-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define COMMON_ARCH_OPCODE_LIST(V) \
- /* Tail call opcodes are grouped together to make IsTailCall fast */ \
- V(ArchTailCallCodeObjectFromJSFunction) \
- V(ArchTailCallCodeObject) \
- V(ArchTailCallAddress) \
- V(ArchTailCallWasm) \
- /* Update IsTailCall if further TailCall opcodes are added */ \
- \
- V(ArchCallCodeObject) \
- V(ArchCallJSFunction) \
- V(ArchPrepareCallCFunction) \
- V(ArchSaveCallerRegisters) \
- V(ArchRestoreCallerRegisters) \
- V(ArchCallCFunction) \
- V(ArchPrepareTailCall) \
- V(ArchCallWasmFunction) \
- V(ArchCallBuiltinPointer) \
- V(ArchJmp) \
- V(ArchBinarySearchSwitch) \
- V(ArchTableSwitch) \
- V(ArchNop) \
- V(ArchAbortCSAAssert) \
- V(ArchDebugBreak) \
- V(ArchComment) \
- V(ArchThrowTerminator) \
- V(ArchDeoptimize) \
- V(ArchRet) \
- V(ArchFramePointer) \
- V(ArchParentFramePointer) \
- V(ArchTruncateDoubleToI) \
- V(ArchStoreWithWriteBarrier) \
- V(ArchStackSlot) \
- V(ArchWordPoisonOnSpeculation) \
- V(ArchStackPointerGreaterThan) \
- V(ArchStackCheckOffset) \
- V(Word32AtomicLoadInt8) \
- V(Word32AtomicLoadUint8) \
- V(Word32AtomicLoadInt16) \
- V(Word32AtomicLoadUint16) \
- V(Word32AtomicLoadWord32) \
- V(Word32AtomicStoreWord8) \
- V(Word32AtomicStoreWord16) \
- V(Word32AtomicStoreWord32) \
- V(Word32AtomicExchangeInt8) \
- V(Word32AtomicExchangeUint8) \
- V(Word32AtomicExchangeInt16) \
- V(Word32AtomicExchangeUint16) \
- V(Word32AtomicExchangeWord32) \
- V(Word32AtomicCompareExchangeInt8) \
- V(Word32AtomicCompareExchangeUint8) \
- V(Word32AtomicCompareExchangeInt16) \
- V(Word32AtomicCompareExchangeUint16) \
- V(Word32AtomicCompareExchangeWord32) \
- V(Word32AtomicAddInt8) \
- V(Word32AtomicAddUint8) \
- V(Word32AtomicAddInt16) \
- V(Word32AtomicAddUint16) \
- V(Word32AtomicAddWord32) \
- V(Word32AtomicSubInt8) \
- V(Word32AtomicSubUint8) \
- V(Word32AtomicSubInt16) \
- V(Word32AtomicSubUint16) \
- V(Word32AtomicSubWord32) \
- V(Word32AtomicAndInt8) \
- V(Word32AtomicAndUint8) \
- V(Word32AtomicAndInt16) \
- V(Word32AtomicAndUint16) \
- V(Word32AtomicAndWord32) \
- V(Word32AtomicOrInt8) \
- V(Word32AtomicOrUint8) \
- V(Word32AtomicOrInt16) \
- V(Word32AtomicOrUint16) \
- V(Word32AtomicOrWord32) \
- V(Word32AtomicXorInt8) \
- V(Word32AtomicXorUint8) \
- V(Word32AtomicXorInt16) \
- V(Word32AtomicXorUint16) \
- V(Word32AtomicXorWord32) \
- V(Ieee754Float64Acos) \
- V(Ieee754Float64Acosh) \
- V(Ieee754Float64Asin) \
- V(Ieee754Float64Asinh) \
- V(Ieee754Float64Atan) \
- V(Ieee754Float64Atanh) \
- V(Ieee754Float64Atan2) \
- V(Ieee754Float64Cbrt) \
- V(Ieee754Float64Cos) \
- V(Ieee754Float64Cosh) \
- V(Ieee754Float64Exp) \
- V(Ieee754Float64Expm1) \
- V(Ieee754Float64Log) \
- V(Ieee754Float64Log1p) \
- V(Ieee754Float64Log10) \
- V(Ieee754Float64Log2) \
- V(Ieee754Float64Pow) \
- V(Ieee754Float64Sin) \
- V(Ieee754Float64Sinh) \
- V(Ieee754Float64Tan) \
+#define COMMON_ARCH_OPCODE_LIST(V) \
+ /* Tail call opcodes are grouped together to make IsTailCall fast */ \
+ /* and Arch call opcodes are grouped together to make */ \
+ /* IsCallWithDescriptorFlags fast */ \
+ V(ArchTailCallCodeObjectFromJSFunction) \
+ V(ArchTailCallCodeObject) \
+ V(ArchTailCallAddress) \
+ V(ArchTailCallWasm) \
+ /* Update IsTailCall if further TailCall opcodes are added */ \
+ \
+ V(ArchCallCodeObject) \
+ V(ArchCallJSFunction) \
+ V(ArchCallWasmFunction) \
+ V(ArchCallBuiltinPointer) \
+ /* Update IsCallWithDescriptorFlags if further Call opcodes are added */ \
+ \
+ V(ArchPrepareCallCFunction) \
+ V(ArchSaveCallerRegisters) \
+ V(ArchRestoreCallerRegisters) \
+ V(ArchCallCFunction) \
+ V(ArchPrepareTailCall) \
+ V(ArchJmp) \
+ V(ArchBinarySearchSwitch) \
+ V(ArchTableSwitch) \
+ V(ArchNop) \
+ V(ArchAbortCSAAssert) \
+ V(ArchDebugBreak) \
+ V(ArchComment) \
+ V(ArchThrowTerminator) \
+ V(ArchDeoptimize) \
+ V(ArchRet) \
+ V(ArchFramePointer) \
+ V(ArchParentFramePointer) \
+ V(ArchTruncateDoubleToI) \
+ V(ArchStoreWithWriteBarrier) \
+ V(ArchStackSlot) \
+ V(ArchWordPoisonOnSpeculation) \
+ V(ArchStackPointerGreaterThan) \
+ V(ArchStackCheckOffset) \
+ V(Word32AtomicLoadInt8) \
+ V(Word32AtomicLoadUint8) \
+ V(Word32AtomicLoadInt16) \
+ V(Word32AtomicLoadUint16) \
+ V(Word32AtomicLoadWord32) \
+ V(Word32AtomicStoreWord8) \
+ V(Word32AtomicStoreWord16) \
+ V(Word32AtomicStoreWord32) \
+ V(Word32AtomicExchangeInt8) \
+ V(Word32AtomicExchangeUint8) \
+ V(Word32AtomicExchangeInt16) \
+ V(Word32AtomicExchangeUint16) \
+ V(Word32AtomicExchangeWord32) \
+ V(Word32AtomicCompareExchangeInt8) \
+ V(Word32AtomicCompareExchangeUint8) \
+ V(Word32AtomicCompareExchangeInt16) \
+ V(Word32AtomicCompareExchangeUint16) \
+ V(Word32AtomicCompareExchangeWord32) \
+ V(Word32AtomicAddInt8) \
+ V(Word32AtomicAddUint8) \
+ V(Word32AtomicAddInt16) \
+ V(Word32AtomicAddUint16) \
+ V(Word32AtomicAddWord32) \
+ V(Word32AtomicSubInt8) \
+ V(Word32AtomicSubUint8) \
+ V(Word32AtomicSubInt16) \
+ V(Word32AtomicSubUint16) \
+ V(Word32AtomicSubWord32) \
+ V(Word32AtomicAndInt8) \
+ V(Word32AtomicAndUint8) \
+ V(Word32AtomicAndInt16) \
+ V(Word32AtomicAndUint16) \
+ V(Word32AtomicAndWord32) \
+ V(Word32AtomicOrInt8) \
+ V(Word32AtomicOrUint8) \
+ V(Word32AtomicOrInt16) \
+ V(Word32AtomicOrUint16) \
+ V(Word32AtomicOrWord32) \
+ V(Word32AtomicXorInt8) \
+ V(Word32AtomicXorUint8) \
+ V(Word32AtomicXorInt16) \
+ V(Word32AtomicXorUint16) \
+ V(Word32AtomicXorWord32) \
+ V(Ieee754Float64Acos) \
+ V(Ieee754Float64Acosh) \
+ V(Ieee754Float64Asin) \
+ V(Ieee754Float64Asinh) \
+ V(Ieee754Float64Atan) \
+ V(Ieee754Float64Atanh) \
+ V(Ieee754Float64Atan2) \
+ V(Ieee754Float64Cbrt) \
+ V(Ieee754Float64Cos) \
+ V(Ieee754Float64Cosh) \
+ V(Ieee754Float64Exp) \
+ V(Ieee754Float64Expm1) \
+ V(Ieee754Float64Log) \
+ V(Ieee754Float64Log1p) \
+ V(Ieee754Float64Log10) \
+ V(Ieee754Float64Log2) \
+ V(Ieee754Float64Pow) \
+ V(Ieee754Float64Sin) \
+ V(Ieee754Float64Sinh) \
+ V(Ieee754Float64Tan) \
V(Ieee754Float64Tanh)
#define ARCH_OPCODE_LIST(V) \
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 1c14832bbf..b62cc83532 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -11,6 +11,7 @@
#include "src/codegen/tick-counter.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
@@ -28,9 +29,9 @@ InstructionSelector::InstructionSelector(
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
- size_t* max_unoptimized_frame_height, size_t* max_pushed_argument_count,
- SourcePositionMode source_position_mode, Features features,
- EnableScheduling enable_scheduling,
+ JSHeapBroker* broker, size_t* max_unoptimized_frame_height,
+ size_t* max_pushed_argument_count, SourcePositionMode source_position_mode,
+ Features features, EnableScheduling enable_scheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing,
PoisoningMitigationLevel poisoning_level, EnableTraceTurboJson trace_turbo)
: zone_(zone),
@@ -61,6 +62,7 @@ InstructionSelector::InstructionSelector(
instr_origins_(sequence->zone()),
trace_turbo_(trace_turbo),
tick_counter_(tick_counter),
+ broker_(broker),
max_unoptimized_frame_height_(max_unoptimized_frame_height),
max_pushed_argument_count_(max_pushed_argument_count)
#if V8_TARGET_ARCH_64_BIT
@@ -604,9 +606,8 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
values->PushArgumentsLength();
return 0;
}
- case IrOpcode::kObjectState: {
+ case IrOpcode::kObjectState:
UNREACHABLE();
- }
case IrOpcode::kTypedObjectState:
case IrOpcode::kObjectId: {
size_t id = deduplicator->GetObjectId(input);
@@ -1129,6 +1130,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
node->opcode() == IrOpcode::kCall ||
node->opcode() == IrOpcode::kProtectedLoad ||
node->opcode() == IrOpcode::kProtectedStore ||
+ node->opcode() == IrOpcode::kLoadTransform ||
#define ADD_EFFECT_FOR_ATOMIC_OP(Opcode) \
node->opcode() == IrOpcode::k##Opcode ||
MACHINE_ATOMIC_OP_LIST(ADD_EFFECT_FOR_ATOMIC_OP)
@@ -1330,6 +1332,8 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kFinishRegion:
return MarkAsTagged(node), VisitFinishRegion(node);
case IrOpcode::kParameter: {
+ // Parameters should always be scheduled to the first block.
+ DCHECK_EQ(schedule()->block(node)->rpo_number(), 0);
MachineType type =
linkage()->GetParameterType(ParameterIndexOf(node->op()));
MarkAsRepresentation(type.representation(), node);
@@ -1411,6 +1415,10 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
return VisitLoadTransform(node);
}
+ case IrOpcode::kLoadLane: {
+ MarkAsRepresentation(MachineRepresentation::kSimd128, node);
+ return VisitLoadLane(node);
+ }
case IrOpcode::kPoisonedLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
@@ -1420,6 +1428,10 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitStore(node);
case IrOpcode::kProtectedStore:
return VisitProtectedStore(node);
+ case IrOpcode::kStoreLane: {
+ MarkAsRepresentation(MachineRepresentation::kSimd128, node);
+ return VisitStoreLane(node);
+ }
case IrOpcode::kWord32And:
return MarkAsWord32(node), VisitWord32And(node);
case IrOpcode::kWord32Or:
@@ -1981,6 +1993,16 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI64x2ReplaceLaneI32Pair(node);
case IrOpcode::kI64x2Neg:
return MarkAsSimd128(node), VisitI64x2Neg(node);
+ case IrOpcode::kI64x2SConvertI32x4Low:
+ return MarkAsSimd128(node), VisitI64x2SConvertI32x4Low(node);
+ case IrOpcode::kI64x2SConvertI32x4High:
+ return MarkAsSimd128(node), VisitI64x2SConvertI32x4High(node);
+ case IrOpcode::kI64x2UConvertI32x4Low:
+ return MarkAsSimd128(node), VisitI64x2UConvertI32x4Low(node);
+ case IrOpcode::kI64x2UConvertI32x4High:
+ return MarkAsSimd128(node), VisitI64x2UConvertI32x4High(node);
+ case IrOpcode::kI64x2BitMask:
+ return MarkAsWord32(node), VisitI64x2BitMask(node);
case IrOpcode::kI64x2Shl:
return MarkAsSimd128(node), VisitI64x2Shl(node);
case IrOpcode::kI64x2ShrS:
@@ -1991,28 +2013,20 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI64x2Sub(node);
case IrOpcode::kI64x2Mul:
return MarkAsSimd128(node), VisitI64x2Mul(node);
- case IrOpcode::kI64x2MinS:
- return MarkAsSimd128(node), VisitI64x2MinS(node);
- case IrOpcode::kI64x2MaxS:
- return MarkAsSimd128(node), VisitI64x2MaxS(node);
case IrOpcode::kI64x2Eq:
return MarkAsSimd128(node), VisitI64x2Eq(node);
- case IrOpcode::kI64x2Ne:
- return MarkAsSimd128(node), VisitI64x2Ne(node);
- case IrOpcode::kI64x2GtS:
- return MarkAsSimd128(node), VisitI64x2GtS(node);
- case IrOpcode::kI64x2GeS:
- return MarkAsSimd128(node), VisitI64x2GeS(node);
case IrOpcode::kI64x2ShrU:
return MarkAsSimd128(node), VisitI64x2ShrU(node);
- case IrOpcode::kI64x2MinU:
- return MarkAsSimd128(node), VisitI64x2MinU(node);
- case IrOpcode::kI64x2MaxU:
- return MarkAsSimd128(node), VisitI64x2MaxU(node);
- case IrOpcode::kI64x2GtU:
- return MarkAsSimd128(node), VisitI64x2GtU(node);
- case IrOpcode::kI64x2GeU:
- return MarkAsSimd128(node), VisitI64x2GeU(node);
+ case IrOpcode::kI64x2ExtMulLowI32x4S:
+ return MarkAsSimd128(node), VisitI64x2ExtMulLowI32x4S(node);
+ case IrOpcode::kI64x2ExtMulHighI32x4S:
+ return MarkAsSimd128(node), VisitI64x2ExtMulHighI32x4S(node);
+ case IrOpcode::kI64x2ExtMulLowI32x4U:
+ return MarkAsSimd128(node), VisitI64x2ExtMulLowI32x4U(node);
+ case IrOpcode::kI64x2ExtMulHighI32x4U:
+ return MarkAsSimd128(node), VisitI64x2ExtMulHighI32x4U(node);
+ case IrOpcode::kI64x2SignSelect:
+ return MarkAsSimd128(node), VisitI64x2SignSelect(node);
case IrOpcode::kI32x4Splat:
return MarkAsSimd128(node), VisitI32x4Splat(node);
case IrOpcode::kI32x4ExtractLane:
@@ -2073,6 +2087,20 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitI32x4BitMask(node);
case IrOpcode::kI32x4DotI16x8S:
return MarkAsSimd128(node), VisitI32x4DotI16x8S(node);
+ case IrOpcode::kI32x4ExtMulLowI16x8S:
+ return MarkAsSimd128(node), VisitI32x4ExtMulLowI16x8S(node);
+ case IrOpcode::kI32x4ExtMulHighI16x8S:
+ return MarkAsSimd128(node), VisitI32x4ExtMulHighI16x8S(node);
+ case IrOpcode::kI32x4ExtMulLowI16x8U:
+ return MarkAsSimd128(node), VisitI32x4ExtMulLowI16x8U(node);
+ case IrOpcode::kI32x4ExtMulHighI16x8U:
+ return MarkAsSimd128(node), VisitI32x4ExtMulHighI16x8U(node);
+ case IrOpcode::kI32x4SignSelect:
+ return MarkAsSimd128(node), VisitI32x4SignSelect(node);
+ case IrOpcode::kI32x4ExtAddPairwiseI16x8S:
+ return MarkAsSimd128(node), VisitI32x4ExtAddPairwiseI16x8S(node);
+ case IrOpcode::kI32x4ExtAddPairwiseI16x8U:
+ return MarkAsSimd128(node), VisitI32x4ExtAddPairwiseI16x8U(node);
case IrOpcode::kI16x8Splat:
return MarkAsSimd128(node), VisitI16x8Splat(node);
case IrOpcode::kI16x8ExtractLaneU:
@@ -2095,14 +2123,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI16x8SConvertI32x4(node);
case IrOpcode::kI16x8Add:
return MarkAsSimd128(node), VisitI16x8Add(node);
- case IrOpcode::kI16x8AddSaturateS:
- return MarkAsSimd128(node), VisitI16x8AddSaturateS(node);
+ case IrOpcode::kI16x8AddSatS:
+ return MarkAsSimd128(node), VisitI16x8AddSatS(node);
case IrOpcode::kI16x8AddHoriz:
return MarkAsSimd128(node), VisitI16x8AddHoriz(node);
case IrOpcode::kI16x8Sub:
return MarkAsSimd128(node), VisitI16x8Sub(node);
- case IrOpcode::kI16x8SubSaturateS:
- return MarkAsSimd128(node), VisitI16x8SubSaturateS(node);
+ case IrOpcode::kI16x8SubSatS:
+ return MarkAsSimd128(node), VisitI16x8SubSatS(node);
case IrOpcode::kI16x8Mul:
return MarkAsSimd128(node), VisitI16x8Mul(node);
case IrOpcode::kI16x8MinS:
@@ -2125,10 +2153,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI16x8ShrU(node);
case IrOpcode::kI16x8UConvertI32x4:
return MarkAsSimd128(node), VisitI16x8UConvertI32x4(node);
- case IrOpcode::kI16x8AddSaturateU:
- return MarkAsSimd128(node), VisitI16x8AddSaturateU(node);
- case IrOpcode::kI16x8SubSaturateU:
- return MarkAsSimd128(node), VisitI16x8SubSaturateU(node);
+ case IrOpcode::kI16x8AddSatU:
+ return MarkAsSimd128(node), VisitI16x8AddSatU(node);
+ case IrOpcode::kI16x8SubSatU:
+ return MarkAsSimd128(node), VisitI16x8SubSatU(node);
case IrOpcode::kI16x8MinU:
return MarkAsSimd128(node), VisitI16x8MinU(node);
case IrOpcode::kI16x8MaxU:
@@ -2139,10 +2167,26 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI16x8GeU(node);
case IrOpcode::kI16x8RoundingAverageU:
return MarkAsSimd128(node), VisitI16x8RoundingAverageU(node);
+ case IrOpcode::kI16x8Q15MulRSatS:
+ return MarkAsSimd128(node), VisitI16x8Q15MulRSatS(node);
case IrOpcode::kI16x8Abs:
return MarkAsSimd128(node), VisitI16x8Abs(node);
case IrOpcode::kI16x8BitMask:
return MarkAsWord32(node), VisitI16x8BitMask(node);
+ case IrOpcode::kI16x8ExtMulLowI8x16S:
+ return MarkAsSimd128(node), VisitI16x8ExtMulLowI8x16S(node);
+ case IrOpcode::kI16x8ExtMulHighI8x16S:
+ return MarkAsSimd128(node), VisitI16x8ExtMulHighI8x16S(node);
+ case IrOpcode::kI16x8ExtMulLowI8x16U:
+ return MarkAsSimd128(node), VisitI16x8ExtMulLowI8x16U(node);
+ case IrOpcode::kI16x8ExtMulHighI8x16U:
+ return MarkAsSimd128(node), VisitI16x8ExtMulHighI8x16U(node);
+ case IrOpcode::kI16x8SignSelect:
+ return MarkAsSimd128(node), VisitI16x8SignSelect(node);
+ case IrOpcode::kI16x8ExtAddPairwiseI8x16S:
+ return MarkAsSimd128(node), VisitI16x8ExtAddPairwiseI8x16S(node);
+ case IrOpcode::kI16x8ExtAddPairwiseI8x16U:
+ return MarkAsSimd128(node), VisitI16x8ExtAddPairwiseI8x16U(node);
case IrOpcode::kI8x16Splat:
return MarkAsSimd128(node), VisitI8x16Splat(node);
case IrOpcode::kI8x16ExtractLaneU:
@@ -2161,12 +2205,12 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI8x16SConvertI16x8(node);
case IrOpcode::kI8x16Add:
return MarkAsSimd128(node), VisitI8x16Add(node);
- case IrOpcode::kI8x16AddSaturateS:
- return MarkAsSimd128(node), VisitI8x16AddSaturateS(node);
+ case IrOpcode::kI8x16AddSatS:
+ return MarkAsSimd128(node), VisitI8x16AddSatS(node);
case IrOpcode::kI8x16Sub:
return MarkAsSimd128(node), VisitI8x16Sub(node);
- case IrOpcode::kI8x16SubSaturateS:
- return MarkAsSimd128(node), VisitI8x16SubSaturateS(node);
+ case IrOpcode::kI8x16SubSatS:
+ return MarkAsSimd128(node), VisitI8x16SubSatS(node);
case IrOpcode::kI8x16Mul:
return MarkAsSimd128(node), VisitI8x16Mul(node);
case IrOpcode::kI8x16MinS:
@@ -2185,10 +2229,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI8x16ShrU(node);
case IrOpcode::kI8x16UConvertI16x8:
return MarkAsSimd128(node), VisitI8x16UConvertI16x8(node);
- case IrOpcode::kI8x16AddSaturateU:
- return MarkAsSimd128(node), VisitI8x16AddSaturateU(node);
- case IrOpcode::kI8x16SubSaturateU:
- return MarkAsSimd128(node), VisitI8x16SubSaturateU(node);
+ case IrOpcode::kI8x16AddSatU:
+ return MarkAsSimd128(node), VisitI8x16AddSatU(node);
+ case IrOpcode::kI8x16SubSatU:
+ return MarkAsSimd128(node), VisitI8x16SubSatU(node);
case IrOpcode::kI8x16MinU:
return MarkAsSimd128(node), VisitI8x16MinU(node);
case IrOpcode::kI8x16MaxU:
@@ -2199,10 +2243,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI8x16GeU(node);
case IrOpcode::kI8x16RoundingAverageU:
return MarkAsSimd128(node), VisitI8x16RoundingAverageU(node);
+ case IrOpcode::kI8x16Popcnt:
+ return MarkAsSimd128(node), VisitI8x16Popcnt(node);
case IrOpcode::kI8x16Abs:
return MarkAsSimd128(node), VisitI8x16Abs(node);
case IrOpcode::kI8x16BitMask:
return MarkAsWord32(node), VisitI8x16BitMask(node);
+ case IrOpcode::kI8x16SignSelect:
+ return MarkAsSimd128(node), VisitI8x16SignSelect(node);
case IrOpcode::kS128Const:
return MarkAsSimd128(node), VisitS128Const(node);
case IrOpcode::kS128Zero:
@@ -2223,10 +2271,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI8x16Swizzle(node);
case IrOpcode::kI8x16Shuffle:
return MarkAsSimd128(node), VisitI8x16Shuffle(node);
- case IrOpcode::kV64x2AnyTrue:
- return MarkAsWord32(node), VisitV64x2AnyTrue(node);
- case IrOpcode::kV64x2AllTrue:
- return MarkAsWord32(node), VisitV64x2AllTrue(node);
case IrOpcode::kV32x4AnyTrue:
return MarkAsWord32(node), VisitV32x4AnyTrue(node);
case IrOpcode::kV32x4AllTrue:
@@ -2668,30 +2712,104 @@ void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2Ne(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2GtS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2GeS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2GtU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2GeU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitV64x2AnyTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitV64x2AllTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfms(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM64
-void InstructionSelector::VisitI64x2MinS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2MaxS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2MinU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2MaxU(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \
- !V8_TARGET_ARCH_ARM
-// TODO(v8:10583) Prototype i32x4.dot_i16x8_s
-void InstructionSelector::VisitI32x4DotI16x8S(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
- // && !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM64
+// TODO(v8:10971) Prototype i16x8.q15mulr_sat_s
+void InstructionSelector::VisitI16x8Q15MulRSatS(Node* node) { UNIMPLEMENTED(); }
+
+// TODO(v8:10972) Prototype i64x2 widen i32x4.
+void InstructionSelector::VisitI64x2SConvertI32x4Low(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI64x2SConvertI32x4High(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI64x2UConvertI32x4Low(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI64x2UConvertI32x4High(Node* node) {
+ UNIMPLEMENTED();
+}
+
+// TODO(v8:11002) Prototype i8x16.popcnt.
+void InstructionSelector::VisitI8x16Popcnt(Node* node) { UNIMPLEMENTED(); }
+
+// TODO(v8:11008) Prototype extended multiplication.
+void InstructionSelector::VisitI64x2ExtMulLowI32x4S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI64x2ExtMulHighI32x4S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI64x2ExtMulLowI32x4U(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI64x2ExtMulHighI32x4U(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI32x4ExtMulLowI16x8S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI32x4ExtMulHighI16x8S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI32x4ExtMulLowI16x8U(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI32x4ExtMulHighI16x8U(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI16x8ExtMulLowI8x16S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI16x8ExtMulHighI8x16S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI16x8ExtMulLowI8x16U(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI16x8ExtMulHighI8x16U(Node* node) {
+ UNIMPLEMENTED();
+}
+
+// TODO(v8:11086) Prototype extended pairwise add.
+void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8U(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
+ UNIMPLEMENTED();
+}
+#endif // !V8_TARGET_ARCH_ARM64
+
+#if !V8_TARGET_ARCH_X64
+// TODO(v8:10975): Prototyping load lane and store lane.
+void InstructionSelector::VisitLoadLane(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitStoreLane(Node* node) { UNIMPLEMENTED(); }
+
+// TODO(v8:10997) Prototype i64x2.bitmask.
+void InstructionSelector::VisitI64x2BitMask(Node* node) { UNIMPLEMENTED(); }
+
+// TODO(v8:10983) Prototyping sign select.
+void InstructionSelector::VisitI8x16SignSelect(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8SignSelect(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4SignSelect(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2SignSelect(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
@@ -2720,6 +2838,7 @@ constexpr InstructionCode EncodeCallDescriptorFlags(
// Note: Not all bits of `flags` are preserved.
STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode ==
MiscField::kSize);
+ CONSTEXPR_DCHECK(Instruction::IsCallWithDescriptorFlags(opcode));
return opcode | MiscField::encode(flags & MiscField::kMax);
}
@@ -2838,7 +2957,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
}
// Select the appropriate opcode based on the call type.
- InstructionCode opcode = kArchNop;
+ InstructionCode opcode;
switch (call_descriptor->kind()) {
case CallDescriptor::kCallAddress: {
int misc_field = static_cast<int>(call_descriptor->ParameterCount());
@@ -2921,7 +3040,6 @@ void InstructionSelector::VisitTailCall(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
int temps_count = GetTempsCountForTailCallFromJSFunction();
for (int i = 0; i < temps_count; i++) {
@@ -2940,7 +3058,6 @@ void InstructionSelector::VisitTailCall(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
}
opcode = EncodeCallDescriptorFlags(opcode, call_descriptor->flags());
@@ -3072,6 +3189,7 @@ void InstructionSelector::VisitUnreachable(Node* node) {
void InstructionSelector::VisitStaticAssert(Node* node) {
Node* asserted = node->InputAt(0);
+ UnparkedScopeIfNeeded scope(broker_);
AllowHandleDereference allow_handle_dereference;
asserted->Print(4);
FATAL(
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index 6452e3ec4c..fc16814d45 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -272,7 +272,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
- size_t* max_unoptimized_frame_height, size_t* max_pushed_argument_count,
+ JSHeapBroker* broker, size_t* max_unoptimized_frame_height,
+ size_t* max_pushed_argument_count,
SourcePositionMode source_position_mode = kCallSourcePositions,
Features features = SupportedFeatures(),
EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
@@ -708,6 +709,9 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
ZoneVector<std::pair<int, int>> instr_origins_;
EnableTraceTurboJson trace_turbo_;
TickCounter* const tick_counter_;
+ // The broker is only used for unparking the LocalHeap for diagnostic printing
+ // for failed StaticAsserts.
+ JSHeapBroker* const broker_;
// Store the maximal unoptimized frame height and an maximal number of pushed
// arguments (for calls). Later used to apply an offset to stack checks.
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 0419928792..55fce0aeeb 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -705,6 +705,9 @@ class V8_EXPORT_PRIVATE MoveOperands final
DCHECK(!source.IsInvalid() && !destination.IsInvalid());
}
+ MoveOperands(const MoveOperands&) = delete;
+ MoveOperands& operator=(const MoveOperands&) = delete;
+
const InstructionOperand& source() const { return source_; }
InstructionOperand& source() { return source_; }
void set_source(const InstructionOperand& operand) { source_ = operand; }
@@ -742,8 +745,6 @@ class V8_EXPORT_PRIVATE MoveOperands final
private:
InstructionOperand source_;
InstructionOperand destination_;
-
- DISALLOW_COPY_AND_ASSIGN(MoveOperands);
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, const MoveOperands&);
@@ -753,6 +754,8 @@ class V8_EXPORT_PRIVATE ParallelMove final
public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit ParallelMove(Zone* zone) : ZoneVector<MoveOperands*>(zone) {}
+ ParallelMove(const ParallelMove&) = delete;
+ ParallelMove& operator=(const ParallelMove&) = delete;
MoveOperands* AddMove(const InstructionOperand& from,
const InstructionOperand& to) {
@@ -777,9 +780,6 @@ class V8_EXPORT_PRIVATE ParallelMove final
// to_eliminate must be Eliminated.
void PrepareInsertAfter(MoveOperands* move,
ZoneVector<MoveOperands*>* to_eliminate) const;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ParallelMove);
};
std::ostream& operator<<(std::ostream&, const ParallelMove&);
@@ -814,6 +814,9 @@ class InstructionBlock;
class V8_EXPORT_PRIVATE Instruction final {
public:
+ Instruction(const Instruction&) = delete;
+ Instruction& operator=(const Instruction&) = delete;
+
size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
const InstructionOperand* OutputAt(size_t i) const {
DCHECK_LT(i, OutputCount());
@@ -927,6 +930,23 @@ class V8_EXPORT_PRIVATE Instruction final {
return arch_opcode() == ArchOpcode::kArchThrowTerminator;
}
+ static constexpr bool IsCallWithDescriptorFlags(InstructionCode arch_opcode) {
+ return arch_opcode <= ArchOpcode::kArchCallBuiltinPointer;
+ }
+ bool IsCallWithDescriptorFlags() const {
+ return IsCallWithDescriptorFlags(arch_opcode());
+ }
+ bool HasCallDescriptorFlag(CallDescriptor::Flag flag) const {
+ DCHECK(IsCallWithDescriptorFlags());
+ STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode == 10);
+#ifdef DEBUG
+ static constexpr int kInstructionCodeFlagsMask =
+ ((1 << CallDescriptor::kFlagsBitsEncodedInInstructionCode) - 1);
+ DCHECK_EQ(static_cast<int>(flag) & kInstructionCodeFlagsMask, flag);
+#endif
+ return MiscField::decode(opcode()) & flag;
+ }
+
enum GapPosition {
START,
END,
@@ -990,8 +1010,6 @@ class V8_EXPORT_PRIVATE Instruction final {
ReferenceMap* reference_map_;
InstructionBlock* block_;
InstructionOperand operands_[1];
-
- DISALLOW_COPY_AND_ASSIGN(Instruction);
};
std::ostream& operator<<(std::ostream&, const Instruction&);
@@ -1514,6 +1532,8 @@ class V8_EXPORT_PRIVATE InstructionSequence final
const Schedule* schedule);
InstructionSequence(Isolate* isolate, Zone* zone,
InstructionBlocks* instruction_blocks);
+ InstructionSequence(const InstructionSequence&) = delete;
+ InstructionSequence& operator=(const InstructionSequence&) = delete;
int NextVirtualRegister();
int VirtualRegisterCount() const { return next_virtual_register_; }
@@ -1696,8 +1716,6 @@ class V8_EXPORT_PRIVATE InstructionSequence final
// Used at construction time
InstructionBlock* current_block_;
-
- DISALLOW_COPY_AND_ASSIGN(InstructionSequence);
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
diff --git a/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc b/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
index e033799cb9..43808526a8 100644
--- a/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
+++ b/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
@@ -23,6 +23,7 @@ namespace internal {
namespace compiler {
class RegisterState;
+class DeferredBlocksRegion;
// BlockState stores details associated with a particular basic block.
class BlockState final {
@@ -30,8 +31,10 @@ class BlockState final {
BlockState(int block_count, Zone* zone)
: general_registers_in_state_(nullptr),
double_registers_in_state_(nullptr),
+ deferred_blocks_region_(nullptr),
dominated_blocks_(block_count, zone),
- successors_phi_index_(-1) {}
+ successors_phi_index_(-1),
+ is_deferred_block_boundary_(false) {}
// Returns the RegisterState that applies to the input of this block. Can be
// |nullptr| if the no registers of |kind| have been allocated up to this
@@ -51,14 +54,34 @@ class BlockState final {
successors_phi_index_ = index;
}
+ // If this block is deferred, this represents region of deferred blocks
+ // that are directly reachable from this block.
+ DeferredBlocksRegion* deferred_blocks_region() const {
+ return deferred_blocks_region_;
+ }
+ void set_deferred_blocks_region(DeferredBlocksRegion* region) {
+ DCHECK_NULL(deferred_blocks_region_);
+ deferred_blocks_region_ = region;
+ }
+
+ // Returns true if this block represents either a transition from
+ // non-deferred to deferred or vice versa.
+ bool is_deferred_block_boundary() const {
+ return is_deferred_block_boundary_;
+ }
+ void MarkAsDeferredBlockBoundary() { is_deferred_block_boundary_ = true; }
+
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(BlockState);
private:
RegisterState* general_registers_in_state_;
RegisterState* double_registers_in_state_;
+ DeferredBlocksRegion* deferred_blocks_region_;
+
BitVector dominated_blocks_;
int successors_phi_index_;
+ bool is_deferred_block_boundary_;
};
RegisterState* BlockState::register_in_state(RegisterKind kind) {
@@ -145,8 +168,7 @@ const InstructionBlock* MidTierRegisterAllocationData::GetBlock(
}
const BitVector* MidTierRegisterAllocationData::GetBlocksDominatedBy(
- int instr_index) {
- const InstructionBlock* block = GetBlock(instr_index);
+ const InstructionBlock* block) {
return block_state(block->rpo_number()).dominated_blocks();
}
@@ -225,6 +247,32 @@ class Range {
int end_;
};
+// Represents a connected region of deferred basic blocks.
+class DeferredBlocksRegion final {
+ public:
+ explicit DeferredBlocksRegion(Zone* zone, int number_of_blocks)
+ : spilled_vregs_(zone), blocks_covered_(number_of_blocks, zone) {}
+
+ void AddBlock(RpoNumber block, MidTierRegisterAllocationData* data) {
+ DCHECK(data->GetBlock(block)->IsDeferred());
+ blocks_covered_.Add(block.ToInt());
+ data->block_state(block).set_deferred_blocks_region(this);
+ }
+
+ // Adds |vreg| to the list of variables to potentially defer their output to
+ // a spill slot until we enter this deferred block region.
+ void DeferSpillOutputUntilEntry(int vreg) { spilled_vregs_.insert(vreg); }
+
+ ZoneSet<int>::iterator begin() const { return spilled_vregs_.begin(); }
+ ZoneSet<int>::iterator end() const { return spilled_vregs_.end(); }
+
+ const BitVector* blocks_covered() const { return &blocks_covered_; }
+
+ private:
+ ZoneSet<int> spilled_vregs_;
+ BitVector blocks_covered_;
+};
+
// VirtualRegisterData stores data specific to a particular virtual register,
// and tracks spilled operands for that virtual register.
class VirtualRegisterData final {
@@ -233,11 +281,17 @@ class VirtualRegisterData final {
// Define VirtualRegisterData with the type of output that produces this
// virtual register.
- void DefineAsUnallocatedOperand(int virtual_register, int instr_index);
+ void DefineAsUnallocatedOperand(int virtual_register, int instr_index,
+ bool is_deferred_block,
+ bool is_exceptional_call_output);
void DefineAsFixedSpillOperand(AllocatedOperand* operand,
- int virtual_register, int instr_index);
- void DefineAsConstantOperand(ConstantOperand* operand, int instr_index);
- void DefineAsPhi(int virtual_register, int instr_index);
+ int virtual_register, int instr_index,
+ bool is_deferred_block,
+ bool is_exceptional_call_output);
+ void DefineAsConstantOperand(ConstantOperand* operand, int instr_index,
+ bool is_deferred_block);
+ void DefineAsPhi(int virtual_register, int instr_index,
+ bool is_deferred_block);
// Spill an operand that is assigned to this virtual register.
void SpillOperand(InstructionOperand* operand, int instr_index,
@@ -254,6 +308,12 @@ class VirtualRegisterData final {
void EmitGapMoveToSpillSlot(AllocatedOperand from_operand, int instr_index,
MidTierRegisterAllocationData* data);
+ // Adds pending spills for deferred-blocks.
+ void AddDeferredSpillUse(int instr_index,
+ MidTierRegisterAllocationData* data);
+ void AddDeferredSpillOutput(AllocatedOperand allocated_op, int instr_index,
+ MidTierRegisterAllocationData* data);
+
// Accessors for spill operand, which may still be pending allocation.
bool HasSpillOperand() const { return spill_operand_ != nullptr; }
InstructionOperand* spill_operand() const {
@@ -271,7 +331,29 @@ class VirtualRegisterData final {
DCHECK_EQ(is_constant(), HasSpillOperand() && spill_operand_->IsConstant());
return is_constant();
}
- bool NeedsSpillAtOutput() const;
+
+ // Returns true if the virtual register should be spilled when it is output.
+ bool NeedsSpillAtOutput() const { return needs_spill_at_output_; }
+ void MarkAsNeedsSpillAtOutput() {
+ if (is_constant()) return;
+ needs_spill_at_output_ = true;
+ if (HasSpillRange()) spill_range()->ClearDeferredBlockSpills();
+ }
+
+ // Returns true if the virtual register should be spilled at entry to deferred
+ // blocks in which it is spilled (to avoid spilling on output on
+ // non-deferred blocks).
+ bool NeedsSpillAtDeferredBlocks() const;
+ void EmitDeferredSpillOutputs(MidTierRegisterAllocationData* data);
+
+ bool IsSpilledAt(int instr_index, MidTierRegisterAllocationData* data) {
+ DCHECK_GE(instr_index, output_instr_index());
+ if (NeedsSpillAtOutput() || HasConstantSpillOperand()) return true;
+ if (HasSpillOperand() && data->GetBlock(instr_index)->IsDeferred()) {
+ return true;
+ }
+ return false;
+ }
// Allocates pending spill operands to the |allocated| spill slot.
void AllocatePendingSpillOperand(const AllocatedOperand& allocated);
@@ -279,26 +361,44 @@ class VirtualRegisterData final {
int vreg() const { return vreg_; }
int output_instr_index() const { return output_instr_index_; }
bool is_constant() const { return is_constant_; }
-
bool is_phi() const { return is_phi_; }
- void set_is_phi(bool value) { is_phi_ = value; }
+ bool is_defined_in_deferred_block() const {
+ return is_defined_in_deferred_block_;
+ }
+ bool is_exceptional_call_output() const {
+ return is_exceptional_call_output_;
+ }
+
+ struct DeferredSpillSlotOutput {
+ public:
+ explicit DeferredSpillSlotOutput(int instr, AllocatedOperand op,
+ const BitVector* blocks)
+ : instr_index(instr), operand(op), live_blocks(blocks) {}
+
+ int instr_index;
+ AllocatedOperand operand;
+ const BitVector* live_blocks;
+ };
// Represents the range of instructions for which this virtual register needs
// to be spilled on the stack.
class SpillRange : public ZoneObject {
public:
// Defines a spill range for an output operand.
- SpillRange(int definition_instr_index, MidTierRegisterAllocationData* data)
+ SpillRange(int definition_instr_index,
+ const InstructionBlock* definition_block,
+ MidTierRegisterAllocationData* data)
: live_range_(definition_instr_index, definition_instr_index),
- live_blocks_(data->GetBlocksDominatedBy(definition_instr_index)) {}
+ live_blocks_(data->GetBlocksDominatedBy(definition_block)),
+ deferred_spill_outputs_(nullptr) {}
// Defines a spill range for a Phi variable.
SpillRange(const InstructionBlock* phi_block,
MidTierRegisterAllocationData* data)
: live_range_(phi_block->first_instruction_index(),
phi_block->first_instruction_index()),
- live_blocks_(data->GetBlocksDominatedBy(
- phi_block->first_instruction_index())) {
+ live_blocks_(data->GetBlocksDominatedBy(phi_block)),
+ deferred_spill_outputs_(nullptr) {
// For phis, add the gap move instructions in the predecssor blocks to
// the live range.
for (RpoNumber pred_rpo : phi_block->predecessors()) {
@@ -307,20 +407,63 @@ class VirtualRegisterData final {
}
}
+ SpillRange(const SpillRange&) = delete;
+ SpillRange& operator=(const SpillRange&) = delete;
+
bool IsLiveAt(int instr_index, InstructionBlock* block) {
- return live_range_.Contains(instr_index) &&
- live_blocks_->Contains(block->rpo_number().ToInt());
+ if (!live_range_.Contains(instr_index)) return false;
+
+ int block_rpo = block->rpo_number().ToInt();
+ if (!live_blocks_->Contains(block_rpo)) return false;
+
+ if (!HasDeferredBlockSpills()) {
+ return true;
+ } else {
+ // If this spill range is only output for deferred block, then the spill
+ // slot will only be live for the deferred blocks, not all blocks that
+ // the virtual register is live.
+ for (auto deferred_spill_output : *deferred_spill_outputs()) {
+ if (deferred_spill_output.live_blocks->Contains(block_rpo)) {
+ return true;
+ }
+ }
+ return false;
+ }
}
void ExtendRangeTo(int instr_index) { live_range_.AddInstr(instr_index); }
+ void AddDeferredSpillOutput(AllocatedOperand allocated_op, int instr_index,
+ MidTierRegisterAllocationData* data) {
+ if (deferred_spill_outputs_ == nullptr) {
+ Zone* zone = data->allocation_zone();
+ deferred_spill_outputs_ =
+ zone->New<ZoneVector<DeferredSpillSlotOutput>>(zone);
+ }
+ const InstructionBlock* block = data->GetBlock(instr_index);
+ DCHECK_EQ(block->first_instruction_index(), instr_index);
+ BlockState& block_state = data->block_state(block->rpo_number());
+ const BitVector* deferred_blocks =
+ block_state.deferred_blocks_region()->blocks_covered();
+ deferred_spill_outputs_->emplace_back(instr_index, allocated_op,
+ deferred_blocks);
+ }
+
+ void ClearDeferredBlockSpills() { deferred_spill_outputs_ = nullptr; }
+ bool HasDeferredBlockSpills() const {
+ return deferred_spill_outputs_ != nullptr;
+ }
+ const ZoneVector<DeferredSpillSlotOutput>* deferred_spill_outputs() const {
+ DCHECK(HasDeferredBlockSpills());
+ return deferred_spill_outputs_;
+ }
+
Range& live_range() { return live_range_; }
private:
Range live_range_;
const BitVector* live_blocks_;
-
- DISALLOW_COPY_AND_ASSIGN(SpillRange);
+ ZoneVector<DeferredSpillSlotOutput>* deferred_spill_outputs_;
};
bool HasSpillRange() const { return spill_range_ != nullptr; }
@@ -331,11 +474,14 @@ class VirtualRegisterData final {
private:
void Initialize(int virtual_register, InstructionOperand* spill_operand,
- int instr_index, bool is_phi, bool is_constant);
+ int instr_index, bool is_phi, bool is_constant,
+ bool is_defined_in_deferred_block,
+ bool is_exceptional_call_output);
- void AddPendingSpillOperand(PendingOperand* pending_operand);
void AddSpillUse(int instr_index, MidTierRegisterAllocationData* data);
+ void AddPendingSpillOperand(PendingOperand* pending_operand);
void EnsureSpillRange(MidTierRegisterAllocationData* data);
+ bool CouldSpillOnEntryToDeferred(const InstructionBlock* block);
InstructionOperand* spill_operand_;
SpillRange* spill_range_;
@@ -344,6 +490,9 @@ class VirtualRegisterData final {
int vreg_;
bool is_phi_ : 1;
bool is_constant_ : 1;
+ bool is_defined_in_deferred_block_ : 1;
+ bool needs_spill_at_output_ : 1;
+ bool is_exceptional_call_output_ : 1;
};
VirtualRegisterData& MidTierRegisterAllocationData::VirtualRegisterDataFor(
@@ -356,33 +505,45 @@ VirtualRegisterData& MidTierRegisterAllocationData::VirtualRegisterDataFor(
void VirtualRegisterData::Initialize(int virtual_register,
InstructionOperand* spill_operand,
int instr_index, bool is_phi,
- bool is_constant) {
+ bool is_constant,
+ bool is_defined_in_deferred_block,
+ bool is_exceptional_call_output) {
vreg_ = virtual_register;
spill_operand_ = spill_operand;
spill_range_ = nullptr;
output_instr_index_ = instr_index;
is_phi_ = is_phi;
is_constant_ = is_constant;
+ is_defined_in_deferred_block_ = is_defined_in_deferred_block;
+ needs_spill_at_output_ = !is_constant_ && spill_operand_ != nullptr;
+ is_exceptional_call_output_ = is_exceptional_call_output;
}
void VirtualRegisterData::DefineAsConstantOperand(ConstantOperand* operand,
- int instr_index) {
- Initialize(operand->virtual_register(), operand, instr_index, false, true);
+ int instr_index,
+ bool is_deferred_block) {
+ Initialize(operand->virtual_register(), operand, instr_index, false, true,
+ is_deferred_block, false);
}
-void VirtualRegisterData::DefineAsFixedSpillOperand(AllocatedOperand* operand,
- int virtual_register,
- int instr_index) {
- Initialize(virtual_register, operand, instr_index, false, false);
+void VirtualRegisterData::DefineAsFixedSpillOperand(
+ AllocatedOperand* operand, int virtual_register, int instr_index,
+ bool is_deferred_block, bool is_exceptional_call_output) {
+ Initialize(virtual_register, operand, instr_index, false, false,
+ is_deferred_block, is_exceptional_call_output);
}
-void VirtualRegisterData::DefineAsUnallocatedOperand(int virtual_register,
- int instr_index) {
- Initialize(virtual_register, nullptr, instr_index, false, false);
+void VirtualRegisterData::DefineAsUnallocatedOperand(
+ int virtual_register, int instr_index, bool is_deferred_block,
+ bool is_exceptional_call_output) {
+ Initialize(virtual_register, nullptr, instr_index, false, false,
+ is_deferred_block, is_exceptional_call_output);
}
-void VirtualRegisterData::DefineAsPhi(int virtual_register, int instr_index) {
- Initialize(virtual_register, nullptr, instr_index, true, false);
+void VirtualRegisterData::DefineAsPhi(int virtual_register, int instr_index,
+ bool is_deferred_block) {
+ Initialize(virtual_register, nullptr, instr_index, true, false,
+ is_deferred_block, false);
}
void VirtualRegisterData::EnsureSpillRange(
@@ -390,16 +551,27 @@ void VirtualRegisterData::EnsureSpillRange(
DCHECK(!is_constant());
if (HasSpillRange()) return;
+ const InstructionBlock* definition_block =
+ data->GetBlock(output_instr_index_);
if (is_phi()) {
// Define a spill slot that is defined for the phi's range.
- const InstructionBlock* definition_block =
- data->code()->InstructionAt(output_instr_index_)->block();
spill_range_ =
data->allocation_zone()->New<SpillRange>(definition_block, data);
} else {
+ if (is_exceptional_call_output()) {
+ // If this virtual register is output by a call which has an exception
+ // catch handler, then the output will only be live in the IfSuccess
+ // successor block, not the IfException side, so make the definition block
+ // the IfSuccess successor block explicitly.
+ DCHECK_EQ(output_instr_index_,
+ definition_block->last_instruction_index() - 1);
+ DCHECK_EQ(definition_block->SuccessorCount(), 2);
+ DCHECK(data->GetBlock(definition_block->successors()[1])->IsHandler());
+ definition_block = data->GetBlock(definition_block->successors()[0]);
+ }
// The spill slot will be defined after the instruction that outputs it.
- spill_range_ =
- data->allocation_zone()->New<SpillRange>(output_instr_index_ + 1, data);
+ spill_range_ = data->allocation_zone()->New<SpillRange>(
+ output_instr_index_ + 1, definition_block, data);
}
data->spilled_virtual_registers().Add(vreg());
}
@@ -407,8 +579,38 @@ void VirtualRegisterData::EnsureSpillRange(
void VirtualRegisterData::AddSpillUse(int instr_index,
MidTierRegisterAllocationData* data) {
if (is_constant()) return;
+
EnsureSpillRange(data);
spill_range_->ExtendRangeTo(instr_index);
+
+ const InstructionBlock* block = data->GetBlock(instr_index);
+ if (CouldSpillOnEntryToDeferred(block)) {
+ data->block_state(block->rpo_number())
+ .deferred_blocks_region()
+ ->DeferSpillOutputUntilEntry(vreg());
+ } else {
+ MarkAsNeedsSpillAtOutput();
+ }
+}
+
+void VirtualRegisterData::AddDeferredSpillUse(
+ int instr_index, MidTierRegisterAllocationData* data) {
+ DCHECK(data->GetBlock(instr_index)->IsDeferred());
+ DCHECK(!is_defined_in_deferred_block());
+ AddSpillUse(instr_index, data);
+}
+
+bool VirtualRegisterData::CouldSpillOnEntryToDeferred(
+ const InstructionBlock* block) {
+ return !NeedsSpillAtOutput() && block->IsDeferred() &&
+ !is_defined_in_deferred_block() && !is_constant();
+}
+
+void VirtualRegisterData::AddDeferredSpillOutput(
+ AllocatedOperand allocated_op, int instr_index,
+ MidTierRegisterAllocationData* data) {
+ DCHECK(!NeedsSpillAtOutput());
+ spill_range_->AddDeferredSpillOutput(allocated_op, instr_index, data);
}
void VirtualRegisterData::SpillOperand(InstructionOperand* operand,
@@ -424,8 +626,17 @@ void VirtualRegisterData::SpillOperand(InstructionOperand* operand,
}
}
-bool VirtualRegisterData::NeedsSpillAtOutput() const {
- return HasSpillOperand() && !is_constant();
+bool VirtualRegisterData::NeedsSpillAtDeferredBlocks() const {
+ return HasSpillRange() && spill_range()->HasDeferredBlockSpills();
+}
+
+void VirtualRegisterData::EmitDeferredSpillOutputs(
+ MidTierRegisterAllocationData* data) {
+ DCHECK(NeedsSpillAtDeferredBlocks());
+ for (auto deferred_spill : *spill_range()->deferred_spill_outputs()) {
+ EmitGapMoveToSpillSlot(deferred_spill.operand, deferred_spill.instr_index,
+ data);
+ }
}
void VirtualRegisterData::EmitGapMoveToInputFromSpillSlot(
@@ -511,17 +722,32 @@ class RegisterState final : public ZoneObject {
RegisterState(const RegisterState& other) V8_NOEXCEPT;
bool IsAllocated(RegisterIndex reg);
+ bool IsShared(RegisterIndex reg);
int VirtualRegisterForRegister(RegisterIndex reg);
// Commit the |reg| with the |allocated| operand.
void Commit(RegisterIndex reg, AllocatedOperand allocated,
InstructionOperand* operand, MidTierRegisterAllocationData* data);
+
// Spill the contents of |reg| for an instruction in |current_block| using
// the |allocated| operand to commit the spill gap move.
void Spill(RegisterIndex reg, AllocatedOperand allocated,
const InstructionBlock* current_block,
MidTierRegisterAllocationData* data);
+ // Add a pending spill of the contents of |reg| at the exit point of a
+ // deferred block at |instr_index| using |allocated| operand to commit the
+ // spill gap move, if the register never gets spilled in a non-deferred block.
+ void SpillForDeferred(RegisterIndex reg, AllocatedOperand allocated,
+ int instr_index, MidTierRegisterAllocationData* data);
+
+ // Add a pending gap move from |reg| to |virtual_register|'s spill at the
+ // entry point of a deferred block at |instr_index|, if the |virtual_register|
+ // never spilled in a non-deferred block.
+ void MoveToSpillSlotOnDeferred(RegisterIndex reg, int virtual_register,
+ int instr_index,
+ MidTierRegisterAllocationData* data);
+
// Allocate |reg| to |virtual_register| for the instruction at |instr_index|.
// If the register is later spilled, a gap move will be added immediately
// before |instr_index| to move |virtual_register| into this register.
@@ -583,18 +809,30 @@ class RegisterState final : public ZoneObject {
void Reset();
// Operations for committing, spilling and allocating uses of the register.
- void Commit(AllocatedOperand allocated_operand);
+ void Commit(AllocatedOperand allocated_operand,
+ MidTierRegisterAllocationData* data);
void Spill(AllocatedOperand allocated_op,
const InstructionBlock* current_block,
MidTierRegisterAllocationData* data);
void Use(int virtual_register, int instr_index);
void PendingUse(InstructionOperand* operand, int virtual_register,
int instr_index);
+ void SpillForDeferred(AllocatedOperand allocated, int instr_index,
+ MidTierRegisterAllocationData* data);
+ void MoveToSpillSlotOnDeferred(int virtual_register, int instr_index,
+ MidTierRegisterAllocationData* data);
// Mark register as holding a phi.
void MarkAsPhiMove();
bool is_phi_gap_move() const { return is_phi_gap_move_; }
+ // The register has deferred block spills, that will be emitted if the
+ // register is committed without having been spilled in a non-deferred block
+ void AddDeferredBlockSpill(int instr_index, bool on_exit, Zone* zone);
+ bool has_deferred_block_spills() const {
+ return deferred_block_spills_.has_value();
+ }
+
// Operations related to dealing with a Register that is shared across
// multiple basic blocks.
void CommitAtMerge();
@@ -627,6 +865,14 @@ class RegisterState final : public ZoneObject {
PendingOperand* pending_uses() const { return pending_uses_; }
private:
+ struct DeferredBlockSpill {
+ DeferredBlockSpill(int instr, bool on_exit)
+ : instr_index(instr), on_deferred_exit(on_exit) {}
+
+ int instr_index;
+ bool on_deferred_exit;
+ };
+
void SpillPendingUses(MidTierRegisterAllocationData* data);
void SpillPhiGapMove(AllocatedOperand allocated_op,
const InstructionBlock* block,
@@ -640,6 +886,7 @@ class RegisterState final : public ZoneObject {
int num_commits_required_;
int virtual_register_;
PendingOperand* pending_uses_;
+ base::Optional<ZoneVector<DeferredBlockSpill>> deferred_block_spills_;
};
void ResetDataFor(RegisterIndex reg);
@@ -667,6 +914,7 @@ void RegisterState::Register::Reset() {
num_commits_required_ = 0;
virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
pending_uses_ = nullptr;
+ deferred_block_spills_.reset();
}
void RegisterState::Register::Use(int virtual_register, int instr_index) {
@@ -689,7 +937,6 @@ void RegisterState::Register::PendingUse(InstructionOperand* operand,
num_commits_required_ = 1;
}
DCHECK_EQ(virtual_register_, virtual_register);
- DCHECK_GE(last_use_instr_index_, instr_index);
PendingOperand pending_op(pending_uses());
InstructionOperand::ReplaceWith(operand, &pending_op);
@@ -701,19 +948,31 @@ void RegisterState::Register::MarkAsPhiMove() {
is_phi_gap_move_ = true;
}
+void RegisterState::Register::AddDeferredBlockSpill(int instr_index,
+ bool on_exit, Zone* zone) {
+ DCHECK(is_allocated());
+ if (!deferred_block_spills_) {
+ deferred_block_spills_.emplace(zone);
+ }
+ deferred_block_spills_->emplace_back(instr_index, on_exit);
+}
+
void RegisterState::Register::AddSharedUses(int shared_use_count) {
is_shared_ = true;
num_commits_required_ += shared_use_count;
}
void RegisterState::Register::CommitAtMerge() {
+ DCHECK(is_shared());
+ DCHECK(is_allocated());
--num_commits_required_;
// We should still have commits required that will be resolved in the merge
// block.
DCHECK_GT(num_commits_required_, 0);
}
-void RegisterState::Register::Commit(AllocatedOperand allocated_op) {
+void RegisterState::Register::Commit(AllocatedOperand allocated_op,
+ MidTierRegisterAllocationData* data) {
DCHECK(is_allocated());
DCHECK_GT(num_commits_required_, 0);
@@ -728,6 +987,29 @@ void RegisterState::Register::Commit(AllocatedOperand allocated_op) {
pending_use = next;
}
pending_uses_ = nullptr;
+
+ VirtualRegisterData& vreg_data =
+ data->VirtualRegisterDataFor(virtual_register());
+
+ // If there are deferred block gap moves pending, emit them now that the
+ // register has been committed.
+ if (has_deferred_block_spills()) {
+ for (DeferredBlockSpill& spill : *deferred_block_spills_) {
+ if (spill.on_deferred_exit) {
+ vreg_data.EmitGapMoveToInputFromSpillSlot(allocated_op,
+ spill.instr_index, data);
+ } else if (!vreg_data.NeedsSpillAtOutput()) {
+ vreg_data.AddDeferredSpillOutput(allocated_op, spill.instr_index,
+ data);
+ }
+ }
+ }
+
+ // If this register was used as a phi gap move, then it being commited
+ // is the point at which we have output the Phi.
+ if (is_phi_gap_move() && vreg_data.NeedsSpillAtDeferredBlocks()) {
+ vreg_data.EmitDeferredSpillOutputs(data);
+ }
}
DCHECK_IMPLIES(num_commits_required_ > 0, is_shared());
}
@@ -735,16 +1017,19 @@ void RegisterState::Register::Commit(AllocatedOperand allocated_op) {
void RegisterState::Register::Spill(AllocatedOperand allocated_op,
const InstructionBlock* current_block,
MidTierRegisterAllocationData* data) {
+ VirtualRegisterData& vreg_data =
+ data->VirtualRegisterDataFor(virtual_register());
+ SpillPendingUses(data);
if (is_phi_gap_move()) {
SpillPhiGapMove(allocated_op, current_block, data);
}
if (needs_gap_move_on_spill()) {
- VirtualRegisterData& vreg_data =
- data->VirtualRegisterDataFor(virtual_register());
vreg_data.EmitGapMoveToInputFromSpillSlot(allocated_op,
last_use_instr_index(), data);
}
- SpillPendingUses(data);
+ if (has_deferred_block_spills() || !current_block->IsDeferred()) {
+ vreg_data.MarkAsNeedsSpillAtOutput();
+ }
virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
}
@@ -784,6 +1069,30 @@ void RegisterState::Register::SpillPendingUses(
pending_uses_ = nullptr;
}
+void RegisterState::Register::SpillForDeferred(
+ AllocatedOperand allocated, int instr_index,
+ MidTierRegisterAllocationData* data) {
+ DCHECK(is_allocated());
+ DCHECK(is_shared());
+ // Add a pending deferred spill, then commit the register (with the commit
+ // being fullfilled by the deferred spill if the register is fully commited).
+ data->VirtualRegisterDataFor(virtual_register())
+ .AddDeferredSpillUse(instr_index, data);
+ AddDeferredBlockSpill(instr_index, true, data->allocation_zone());
+ Commit(allocated, data);
+}
+
+void RegisterState::Register::MoveToSpillSlotOnDeferred(
+ int virtual_register, int instr_index,
+ MidTierRegisterAllocationData* data) {
+ if (!is_allocated()) {
+ virtual_register_ = virtual_register;
+ last_use_instr_index_ = instr_index;
+ num_commits_required_ = 1;
+ }
+ AddDeferredBlockSpill(instr_index, false, data->allocation_zone());
+}
+
RegisterState::RegisterState(RegisterKind kind, int num_allocatable_registers,
Zone* zone)
: register_data_(num_allocatable_registers, zone), zone_(zone) {}
@@ -802,7 +1111,7 @@ int RegisterState::VirtualRegisterForRegister(RegisterIndex reg) {
}
bool RegisterState::IsPhiGapMove(RegisterIndex reg) {
- DCHECK(RegisterState::IsAllocated(reg));
+ DCHECK(IsAllocated(reg));
return reg_data(reg).is_phi_gap_move();
}
@@ -811,7 +1120,7 @@ void RegisterState::Commit(RegisterIndex reg, AllocatedOperand allocated,
MidTierRegisterAllocationData* data) {
InstructionOperand::ReplaceWith(operand, &allocated);
if (IsAllocated(reg)) {
- reg_data(reg).Commit(allocated);
+ reg_data(reg).Commit(allocated, data);
ResetDataFor(reg);
}
}
@@ -824,6 +1133,22 @@ void RegisterState::Spill(RegisterIndex reg, AllocatedOperand allocated,
ResetDataFor(reg);
}
+void RegisterState::SpillForDeferred(RegisterIndex reg,
+ AllocatedOperand allocated,
+ int instr_index,
+ MidTierRegisterAllocationData* data) {
+ DCHECK(IsAllocated(reg));
+ reg_data(reg).SpillForDeferred(allocated, instr_index, data);
+ ResetDataFor(reg);
+}
+
+void RegisterState::MoveToSpillSlotOnDeferred(
+ RegisterIndex reg, int virtual_register, int instr_index,
+ MidTierRegisterAllocationData* data) {
+ EnsureRegisterData(reg);
+ reg_data(reg).MoveToSpillSlotOnDeferred(virtual_register, instr_index, data);
+}
+
void RegisterState::AllocateUse(RegisterIndex reg, int virtual_register,
InstructionOperand* operand, int instr_index,
MidTierRegisterAllocationData* data) {
@@ -848,6 +1173,10 @@ RegisterState::Register& RegisterState::reg_data(RegisterIndex reg) {
return *register_data_[reg.ToInt()];
}
+bool RegisterState::IsShared(RegisterIndex reg) {
+ return HasRegisterData(reg) && reg_data(reg).is_shared();
+}
+
bool RegisterState::IsAllocated(RegisterIndex reg) {
return HasRegisterData(reg) && reg_data(reg).is_allocated();
}
@@ -908,6 +1237,50 @@ RegisterState* RegisterState::Clone() {
return zone_->New<RegisterState>(*this);
}
+class RegisterBitVector {
+ public:
+ RegisterBitVector() : bits_(0) {}
+
+ bool Contains(RegisterIndex reg, MachineRepresentation rep) const {
+ return bits_ & reg.ToBit(rep);
+ }
+
+ RegisterIndex GetFirstSet() const {
+ return RegisterIndex(base::bits::CountTrailingZeros(bits_));
+ }
+
+ RegisterIndex GetFirstCleared(int max_reg) const {
+ int reg_index = base::bits::CountTrailingZeros(~bits_);
+ if (reg_index < max_reg) {
+ return RegisterIndex(reg_index);
+ } else {
+ return RegisterIndex::Invalid();
+ }
+ }
+
+ void Add(RegisterIndex reg, MachineRepresentation rep) {
+ bits_ |= reg.ToBit(rep);
+ }
+
+ void Clear(RegisterIndex reg, MachineRepresentation rep) {
+ bits_ &= ~reg.ToBit(rep);
+ }
+
+ RegisterBitVector Union(const RegisterBitVector& other) {
+ return RegisterBitVector(bits_ | other.bits_);
+ }
+
+ void Reset() { bits_ = 0; }
+ bool IsEmpty() const { return bits_ == 0; }
+
+ private:
+ explicit RegisterBitVector(uintptr_t bits) : bits_(bits) {}
+
+ static_assert(RegisterConfiguration::kMaxRegisters <= sizeof(uintptr_t) * 8,
+ "Maximum registers must fit in uintptr_t bitmap");
+ uintptr_t bits_;
+};
+
// A SinglePassRegisterAllocator is a fast register allocator that does a single
// pass through the instruction stream without performing any live-range
// analysis beforehand. It deals with a single RegisterKind, either general or
@@ -953,6 +1326,11 @@ class SinglePassRegisterAllocator final {
void EndBlock(const InstructionBlock* block);
void EndInstruction();
+ void UpdateForDeferredBlock(int instr_index);
+ void AllocateDeferredBlockSpillOutput(int instr_index,
+ RpoNumber deferred_block,
+ int virtual_register);
+
RegisterKind kind() const { return kind_; }
BitVector* assigned_registers() const { return assigned_registers_; }
@@ -985,6 +1363,12 @@ class SinglePassRegisterAllocator final {
// state into the current block.
void SpillRegisterAtMerge(RegisterState* reg_state, RegisterIndex reg);
+ // Introduce a gap move to move |virtual_register| from reg |from| to reg |to|
+ // on entry to a |successor| block.
+ void MoveRegisterOnMerge(RegisterIndex from, RegisterIndex to,
+ int virtual_register, RpoNumber successor,
+ RegisterState* succ_state);
+
// Update the virtual register data with the data in register_state()
void UpdateVirtualRegisterState();
@@ -1017,6 +1401,10 @@ class SinglePassRegisterAllocator final {
void SpillRegister(RegisterIndex reg);
void SpillRegisterForVirtualRegister(int virtual_register);
+ // Pre-emptively spill the register at the exit of deferred blocks such that
+ // uses of this register in non-deferred blocks don't need to be spilled.
+ void SpillRegisterForDeferred(RegisterIndex reg, int instr_index);
+
// Returns an AllocatedOperand corresponding to the use of |reg| for
// |virtual_register|.
AllocatedOperand AllocatedOperandForReg(RegisterIndex reg,
@@ -1031,13 +1419,15 @@ class SinglePassRegisterAllocator final {
// Helper functions to choose the best register for a given operand.
V8_INLINE RegisterIndex
- ChooseRegisterFor(VirtualRegisterData& virtual_register, UsePosition pos,
- bool must_use_register);
+ ChooseRegisterFor(VirtualRegisterData& virtual_register, int instr_index,
+ UsePosition pos, bool must_use_register);
V8_INLINE RegisterIndex ChooseRegisterFor(MachineRepresentation rep,
UsePosition pos,
bool must_use_register);
V8_INLINE RegisterIndex ChooseFreeRegister(MachineRepresentation rep,
UsePosition pos);
+ V8_INLINE RegisterIndex ChooseFreeRegister(
+ const RegisterBitVector& allocated_regs, MachineRepresentation rep);
V8_INLINE RegisterIndex ChooseRegisterToSpill(MachineRepresentation rep,
UsePosition pos);
@@ -1048,7 +1438,7 @@ class SinglePassRegisterAllocator final {
V8_INLINE void FreeRegister(RegisterIndex reg, int virtual_register);
V8_INLINE void MarkRegisterUse(RegisterIndex reg, MachineRepresentation rep,
UsePosition pos);
- V8_INLINE uintptr_t InUseBitmap(UsePosition pos);
+ V8_INLINE RegisterBitVector InUseBitmap(UsePosition pos);
V8_INLINE bool IsValidForRep(RegisterIndex reg, MachineRepresentation rep);
// Return the register allocated to |virtual_register|, if any.
@@ -1063,6 +1453,10 @@ class SinglePassRegisterAllocator final {
bool VirtualRegisterIsUnallocatedOrInReg(int virtual_register,
RegisterIndex reg);
+ // Returns a RegisterBitVector representing the allocated registers in
+ // reg_state.
+ RegisterBitVector GetAllocatedRegBitVector(RegisterState* reg_state);
+
// Check the consistency of reg->vreg and vreg->reg mappings if a debug build.
void CheckConsistency();
@@ -1101,11 +1495,9 @@ class SinglePassRegisterAllocator final {
MidTierRegisterAllocationData* data_;
- static_assert(RegisterConfiguration::kMaxRegisters <= sizeof(uintptr_t) * 8,
- "Maximum registers must fit in uintptr_t bitmap");
- uintptr_t in_use_at_instr_start_bits_;
- uintptr_t in_use_at_instr_end_bits_;
- uintptr_t allocated_registers_bits_;
+ RegisterBitVector in_use_at_instr_start_bits_;
+ RegisterBitVector in_use_at_instr_end_bits_;
+ RegisterBitVector allocated_registers_bits_;
// These fields are only used when kSimpleFPAliasing == false.
base::Optional<ZoneVector<RegisterIndex>> float32_reg_code_to_index_;
@@ -1129,9 +1521,9 @@ SinglePassRegisterAllocator::SinglePassRegisterAllocator(
assigned_registers_(data->code_zone()->New<BitVector>(
GetRegisterCount(data->config(), kind), data->code_zone())),
data_(data),
- in_use_at_instr_start_bits_(0),
- in_use_at_instr_end_bits_(0),
- allocated_registers_bits_(0) {
+ in_use_at_instr_start_bits_(),
+ in_use_at_instr_end_bits_(),
+ allocated_registers_bits_() {
for (int i = 0; i < num_allocatable_registers_; i++) {
int reg_code = index_to_reg_code_[i];
reg_code_to_index_[reg_code] = RegisterIndex(i);
@@ -1189,17 +1581,24 @@ RegisterIndex SinglePassRegisterAllocator::RegisterForVirtualRegister(
return virtual_register_to_reg_[virtual_register];
}
+void SinglePassRegisterAllocator::UpdateForDeferredBlock(int instr_index) {
+ if (!HasRegisterState()) return;
+ for (RegisterIndex reg : *register_state()) {
+ SpillRegisterForDeferred(reg, instr_index);
+ }
+}
+
void SinglePassRegisterAllocator::EndInstruction() {
- in_use_at_instr_end_bits_ = 0;
- in_use_at_instr_start_bits_ = 0;
+ in_use_at_instr_end_bits_.Reset();
+ in_use_at_instr_start_bits_.Reset();
}
void SinglePassRegisterAllocator::StartBlock(const InstructionBlock* block) {
DCHECK(!HasRegisterState());
DCHECK_NULL(current_block_);
- DCHECK_EQ(in_use_at_instr_start_bits_, 0);
- DCHECK_EQ(in_use_at_instr_end_bits_, 0);
- DCHECK_EQ(allocated_registers_bits_, 0);
+ DCHECK(in_use_at_instr_start_bits_.IsEmpty());
+ DCHECK(in_use_at_instr_end_bits_.IsEmpty());
+ DCHECK(allocated_registers_bits_.IsEmpty());
// Update the current block we are processing.
current_block_ = block;
@@ -1216,8 +1615,8 @@ void SinglePassRegisterAllocator::StartBlock(const InstructionBlock* block) {
}
void SinglePassRegisterAllocator::EndBlock(const InstructionBlock* block) {
- DCHECK_EQ(in_use_at_instr_start_bits_, 0);
- DCHECK_EQ(in_use_at_instr_end_bits_, 0);
+ DCHECK(in_use_at_instr_start_bits_.IsEmpty());
+ DCHECK(in_use_at_instr_end_bits_.IsEmpty());
// If we didn't allocate any registers of this kind, or we have reached the
// start, nothing to do here.
@@ -1236,9 +1635,8 @@ void SinglePassRegisterAllocator::EndBlock(const InstructionBlock* block) {
// Remove virtual register to register mappings and clear register state.
// We will update the register state when starting the next block.
- while (allocated_registers_bits_ != 0) {
- RegisterIndex reg(
- base::bits::CountTrailingZeros(allocated_registers_bits_));
+ while (!allocated_registers_bits_.IsEmpty()) {
+ RegisterIndex reg = allocated_registers_bits_.GetFirstSet();
FreeRegister(reg, VirtualRegisterForRegister(reg));
}
current_block_ = nullptr;
@@ -1275,19 +1673,51 @@ void SinglePassRegisterAllocator::MergeStateFrom(
UpdateVirtualRegisterState();
} else {
// Otherwise try to merge our state with the existing state.
- for (RegisterIndex reg : *register_state()) {
+ RegisterBitVector processed_regs;
+ RegisterBitVector succ_allocated_regs =
+ GetAllocatedRegBitVector(successor_registers);
+ for (RegisterIndex reg : *successor_registers) {
+ // If |reg| isn't allocated in successor registers, nothing to do.
+ if (!successor_registers->IsAllocated(reg)) continue;
+
+ int virtual_register =
+ successor_registers->VirtualRegisterForRegister(reg);
+ MachineRepresentation rep = RepresentationFor(virtual_register);
+
+ // If we have already processed |reg|, e.g., adding gap move to that
+ // register, then we can continue.
+ if (processed_regs.Contains(reg, rep)) continue;
+ processed_regs.Add(reg, rep);
+
if (register_state()->IsAllocated(reg)) {
if (successor_registers->Equals(reg, register_state())) {
// Both match, keep the merged register data.
register_state()->CommitAtMerge(reg);
} else {
- // TODO(rmcilroy) consider adding a gap move to shuffle register
- // into the same as the target. For now just spill.
- SpillRegisterAtMerge(successor_registers, reg);
+ // Try to find a new register for this successor register in the
+ // merge block, and add a gap move on entry of the successor block.
+ RegisterIndex new_reg =
+ RegisterForVirtualRegister(virtual_register);
+ if (!new_reg.is_valid()) {
+ new_reg = ChooseFreeRegister(
+ allocated_registers_bits_.Union(succ_allocated_regs), rep);
+ } else if (new_reg != reg) {
+ // Spill the |new_reg| in the successor block to be able to use it
+ // for this gap move. It would be spilled anyway since it contains
+ // a different virtual register than the merge block.
+ SpillRegisterAtMerge(successor_registers, new_reg);
+ }
+
+ if (new_reg.is_valid()) {
+ MoveRegisterOnMerge(new_reg, reg, virtual_register, successor,
+ successor_registers);
+ processed_regs.Add(new_reg, rep);
+ } else {
+ SpillRegisterAtMerge(successor_registers, reg);
+ }
}
- } else if (successor_registers->IsAllocated(reg)) {
- int virtual_register =
- successor_registers->VirtualRegisterForRegister(reg);
+ } else {
+ DCHECK(successor_registers->IsAllocated(reg));
if (RegisterForVirtualRegister(virtual_register).is_valid()) {
// If we already hold the virtual register in a different register
// then spill this register in the sucessor block to avoid
@@ -1298,7 +1728,6 @@ void SinglePassRegisterAllocator::MergeStateFrom(
// Register is free in our current register state, so merge the
// successor block's register details into it.
register_state()->CopyFrom(reg, successor_registers);
- int virtual_register = VirtualRegisterForRegister(reg);
AssignRegister(reg, virtual_register, UsePosition::kNone);
}
}
@@ -1307,6 +1736,18 @@ void SinglePassRegisterAllocator::MergeStateFrom(
}
}
+RegisterBitVector SinglePassRegisterAllocator::GetAllocatedRegBitVector(
+ RegisterState* reg_state) {
+ RegisterBitVector allocated_regs;
+ for (RegisterIndex reg : *reg_state) {
+ if (reg_state->IsAllocated(reg)) {
+ int virtual_register = reg_state->VirtualRegisterForRegister(reg);
+ allocated_regs.Add(reg, RepresentationFor(virtual_register));
+ }
+ }
+ return allocated_regs;
+}
+
void SinglePassRegisterAllocator::SpillRegisterAtMerge(RegisterState* reg_state,
RegisterIndex reg) {
DCHECK_NE(reg_state, register_state());
@@ -1317,6 +1758,17 @@ void SinglePassRegisterAllocator::SpillRegisterAtMerge(RegisterState* reg_state,
}
}
+void SinglePassRegisterAllocator::MoveRegisterOnMerge(
+ RegisterIndex from, RegisterIndex to, int virtual_register,
+ RpoNumber successor, RegisterState* succ_state) {
+ int instr_index = data()->GetBlock(successor)->first_instruction_index();
+ MoveOperands* move =
+ data()->AddPendingOperandGapMove(instr_index, Instruction::START);
+ succ_state->Commit(to, AllocatedOperandForReg(to, virtual_register),
+ &move->destination(), data());
+ AllocatePendingUse(from, virtual_register, &move->source(), instr_index);
+}
+
void SinglePassRegisterAllocator::UpdateVirtualRegisterState() {
// Update to the new register state and update vreg_to_register map and
// resetting any shared registers that were spilled by another block.
@@ -1339,8 +1791,8 @@ void SinglePassRegisterAllocator::CheckConsistency() {
RegisterIndex reg = RegisterForVirtualRegister(virtual_register);
if (reg.is_valid()) {
CHECK_EQ(virtual_register, VirtualRegisterForRegister(reg));
- CHECK(allocated_registers_bits_ &
- reg.ToBit(RepresentationFor(virtual_register)));
+ CHECK(allocated_registers_bits_.Contains(
+ reg, RepresentationFor(virtual_register)));
}
}
@@ -1348,8 +1800,8 @@ void SinglePassRegisterAllocator::CheckConsistency() {
int virtual_register = VirtualRegisterForRegister(reg);
if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
CHECK_EQ(reg, RegisterForVirtualRegister(virtual_register));
- CHECK(allocated_registers_bits_ &
- reg.ToBit(RepresentationFor(virtual_register)));
+ CHECK(allocated_registers_bits_.Contains(
+ reg, RepresentationFor(virtual_register)));
}
}
#endif
@@ -1422,8 +1874,8 @@ void SinglePassRegisterAllocator::AssignRegister(RegisterIndex reg,
UsePosition pos) {
MachineRepresentation rep = RepresentationFor(virtual_register);
assigned_registers()->Add(ToRegCode(reg, rep));
+ allocated_registers_bits_.Add(reg, rep);
MarkRegisterUse(reg, rep, pos);
- allocated_registers_bits_ |= reg.ToBit(rep);
if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
virtual_register_to_reg_[virtual_register] = reg;
}
@@ -1433,30 +1885,31 @@ void SinglePassRegisterAllocator::MarkRegisterUse(RegisterIndex reg,
MachineRepresentation rep,
UsePosition pos) {
if (pos == UsePosition::kStart || pos == UsePosition::kAll) {
- in_use_at_instr_start_bits_ |= reg.ToBit(rep);
+ in_use_at_instr_start_bits_.Add(reg, rep);
}
if (pos == UsePosition::kEnd || pos == UsePosition::kAll) {
- in_use_at_instr_end_bits_ |= reg.ToBit(rep);
+ in_use_at_instr_end_bits_.Add(reg, rep);
}
}
void SinglePassRegisterAllocator::FreeRegister(RegisterIndex reg,
int virtual_register) {
- allocated_registers_bits_ &= ~reg.ToBit(RepresentationFor(virtual_register));
+ allocated_registers_bits_.Clear(reg, RepresentationFor(virtual_register));
if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
virtual_register_to_reg_[virtual_register] = RegisterIndex::Invalid();
}
}
RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
- VirtualRegisterData& virtual_register, UsePosition pos,
+ VirtualRegisterData& virtual_register, int instr_index, UsePosition pos,
bool must_use_register) {
// If register is already allocated to the virtual register, use that.
RegisterIndex reg = RegisterForVirtualRegister(virtual_register.vreg());
+
// If we don't need a register, only try to allocate one if the virtual
// register hasn't yet been spilled, to try to avoid spilling it.
- if (!reg.is_valid() &&
- (must_use_register || !virtual_register.HasSpillOperand())) {
+ if (!reg.is_valid() && (must_use_register ||
+ !virtual_register.IsSpilledAt(instr_index, data()))) {
reg = ChooseRegisterFor(RepresentationFor(virtual_register.vreg()), pos,
must_use_register);
}
@@ -1473,14 +1926,14 @@ RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
return reg;
}
-uintptr_t SinglePassRegisterAllocator::InUseBitmap(UsePosition pos) {
+RegisterBitVector SinglePassRegisterAllocator::InUseBitmap(UsePosition pos) {
switch (pos) {
case UsePosition::kStart:
return in_use_at_instr_start_bits_;
case UsePosition::kEnd:
return in_use_at_instr_end_bits_;
case UsePosition::kAll:
- return in_use_at_instr_start_bits_ | in_use_at_instr_end_bits_;
+ return in_use_at_instr_start_bits_.Union(in_use_at_instr_end_bits_);
case UsePosition::kNone:
UNREACHABLE();
}
@@ -1508,20 +1961,21 @@ RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
MachineRepresentation rep, UsePosition pos) {
// Take the first free, non-blocked register, if available.
// TODO(rmcilroy): Consider a better heuristic.
- uintptr_t allocated_or_in_use = InUseBitmap(pos) | allocated_registers_bits_;
+ RegisterBitVector allocated_or_in_use =
+ InUseBitmap(pos).Union(allocated_registers_bits_);
+ return ChooseFreeRegister(allocated_or_in_use, rep);
+}
+RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
+ const RegisterBitVector& allocated_regs, MachineRepresentation rep) {
RegisterIndex chosen_reg = RegisterIndex::Invalid();
if (kSimpleFPAliasing || kind() == RegisterKind::kGeneral) {
- int reg_index = base::bits::CountTrailingZeros(~allocated_or_in_use);
- if (reg_index < num_allocatable_registers()) {
- chosen_reg = RegisterIndex(reg_index);
- }
+ chosen_reg = allocated_regs.GetFirstCleared(num_allocatable_registers());
} else {
// If we don't have simple fp aliasing, we need to check each register
// individually to get one with the required representation.
for (RegisterIndex reg : *register_state()) {
- if (IsValidForRep(reg, rep) &&
- (allocated_or_in_use & reg.ToBit(rep)) == 0) {
+ if (IsValidForRep(reg, rep) && !allocated_regs.Contains(reg, rep)) {
chosen_reg = reg;
break;
}
@@ -1534,7 +1988,7 @@ RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
RegisterIndex SinglePassRegisterAllocator::ChooseRegisterToSpill(
MachineRepresentation rep, UsePosition pos) {
- uintptr_t in_use = InUseBitmap(pos);
+ RegisterBitVector in_use = InUseBitmap(pos);
// Choose a register that will need to be spilled. Preferentially choose:
// - A register with only pending uses, to avoid having to add a gap move for
@@ -1550,7 +2004,7 @@ RegisterIndex SinglePassRegisterAllocator::ChooseRegisterToSpill(
bool already_spilled = false;
for (RegisterIndex reg : *register_state()) {
// Skip if register is in use, or not valid for representation.
- if (!IsValidForRep(reg, rep) || (in_use & reg.ToBit(rep))) continue;
+ if (!IsValidForRep(reg, rep) || in_use.Contains(reg, rep)) continue;
VirtualRegisterData& vreg_data =
VirtualRegisterDataFor(VirtualRegisterForRegister(reg));
@@ -1610,6 +2064,45 @@ void SinglePassRegisterAllocator::SpillRegisterForVirtualRegister(
}
}
+void SinglePassRegisterAllocator::SpillRegisterForDeferred(RegisterIndex reg,
+ int instr_index) {
+ // Committing the output operation, and mark the register use in this
+ // instruction, then mark it as free going forward.
+ if (register_state()->IsAllocated(reg) && register_state()->IsShared(reg)) {
+ int virtual_register = VirtualRegisterForRegister(reg);
+ AllocatedOperand allocated = AllocatedOperandForReg(reg, virtual_register);
+ register_state()->SpillForDeferred(reg, allocated, instr_index, data());
+ FreeRegister(reg, virtual_register);
+ }
+ CheckConsistency();
+}
+
+void SinglePassRegisterAllocator::AllocateDeferredBlockSpillOutput(
+ int instr_index, RpoNumber deferred_block, int virtual_register) {
+ DCHECK(data()->GetBlock(deferred_block)->IsDeferred());
+ VirtualRegisterData& vreg_data =
+ data()->VirtualRegisterDataFor(virtual_register);
+ if (!vreg_data.NeedsSpillAtOutput() &&
+ !DefinedAfter(virtual_register, instr_index, UsePosition::kEnd)) {
+ // If a register has been assigned to the virtual register, and the virtual
+ // register still doesn't need to be spilled at it's output, and add a
+ // pending move to output the virtual register to it's spill slot on entry
+ // of the deferred block (to avoid spilling on in non-deferred code).
+ // TODO(rmcilroy): Consider assigning a register even if the virtual
+ // register isn't yet assigned - currently doing this regresses performance.
+ RegisterIndex reg = RegisterForVirtualRegister(virtual_register);
+ if (reg.is_valid()) {
+ int deferred_block_start =
+ data()->GetBlock(deferred_block)->first_instruction_index();
+ register_state()->MoveToSpillSlotOnDeferred(reg, virtual_register,
+ deferred_block_start, data());
+ return;
+ } else {
+ vreg_data.MarkAsNeedsSpillAtOutput();
+ }
+ }
+}
+
AllocatedOperand SinglePassRegisterAllocator::AllocatedOperandForReg(
RegisterIndex reg, int virtual_register) {
MachineRepresentation rep = RepresentationFor(virtual_register);
@@ -1709,7 +2202,8 @@ void SinglePassRegisterAllocator::AllocateInput(UnallocatedOperand* operand,
bool must_use_register = operand->HasRegisterPolicy() ||
(vreg_data.is_constant() &&
!operand->HasRegisterOrSlotOrConstantPolicy());
- RegisterIndex reg = ChooseRegisterFor(vreg_data, pos, must_use_register);
+ RegisterIndex reg =
+ ChooseRegisterFor(vreg_data, instr_index, pos, must_use_register);
if (reg.is_valid()) {
if (must_use_register) {
@@ -1731,7 +2225,8 @@ void SinglePassRegisterAllocator::AllocateGapMoveInput(
// Gap move inputs should be unconstrained.
DCHECK(operand->HasRegisterOrSlotPolicy());
- RegisterIndex reg = ChooseRegisterFor(vreg_data, UsePosition::kStart, false);
+ RegisterIndex reg =
+ ChooseRegisterFor(vreg_data, instr_index, UsePosition::kStart, false);
if (reg.is_valid()) {
AllocatePendingUse(reg, virtual_register, operand, instr_index);
} else {
@@ -1769,7 +2264,8 @@ RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
reg = FromRegCode(operand->fixed_register_index(),
RepresentationFor(virtual_register));
} else {
- reg = ChooseRegisterFor(vreg_data, pos, operand->HasRegisterPolicy());
+ reg = ChooseRegisterFor(vreg_data, instr_index, pos,
+ operand->HasRegisterPolicy());
}
// TODO(rmcilroy): support secondary storage.
@@ -1797,6 +2293,8 @@ RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
vreg_data.EmitGapMoveFromOutputToSpillSlot(
*AllocatedOperand::cast(operand), current_block(), instr_index,
data());
+ } else if (vreg_data.NeedsSpillAtDeferredBlocks()) {
+ vreg_data.EmitDeferredSpillOutputs(data());
}
}
@@ -1965,8 +2463,12 @@ void SinglePassRegisterAllocator::AllocatePhi(int virtual_register,
SpillRegisterForVirtualRegister(virtual_register);
} else {
RegisterIndex reg = RegisterForVirtualRegister(virtual_register);
- DCHECK(reg.is_valid());
- register_state()->UseForPhiGapMove(reg);
+ if (reg.is_valid()) {
+ // If the register is valid, assign it as a phi gap move to be processed
+ // at the successor blocks. If no register or spill slot was used then
+ // the virtual register was never used.
+ register_state()->UseForPhiGapMove(reg);
+ }
}
}
@@ -1985,6 +2487,8 @@ class MidTierOutputProcessor final {
void DefineOutputs(const InstructionBlock* block);
private:
+ void PopulateDeferredBlockRegion(RpoNumber initial_block);
+
VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
return data()->VirtualRegisterDataFor(virtual_register);
}
@@ -1992,16 +2496,71 @@ class MidTierOutputProcessor final {
return data()->RepresentationFor(virtual_register);
}
+ bool IsDeferredBlockBoundary(const ZoneVector<RpoNumber>& blocks) {
+ return blocks.size() == 1 && !data()->GetBlock(blocks[0])->IsDeferred();
+ }
+
MidTierRegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data()->code(); }
- Zone* allocation_zone() const { return data()->allocation_zone(); }
+ Zone* zone() const { return data()->allocation_zone(); }
MidTierRegisterAllocationData* const data_;
+ ZoneQueue<RpoNumber> deferred_blocks_worklist_;
+ ZoneSet<RpoNumber> deferred_blocks_processed_;
};
MidTierOutputProcessor::MidTierOutputProcessor(
MidTierRegisterAllocationData* data)
- : data_(data) {}
+ : data_(data),
+ deferred_blocks_worklist_(data->allocation_zone()),
+ deferred_blocks_processed_(data->allocation_zone()) {}
+
+void MidTierOutputProcessor::PopulateDeferredBlockRegion(
+ RpoNumber initial_block) {
+ DeferredBlocksRegion* deferred_blocks_region =
+ zone()->New<DeferredBlocksRegion>(zone(),
+ code()->InstructionBlockCount());
+ DCHECK(deferred_blocks_worklist_.empty());
+ deferred_blocks_worklist_.push(initial_block);
+ deferred_blocks_processed_.insert(initial_block);
+ while (!deferred_blocks_worklist_.empty()) {
+ RpoNumber current = deferred_blocks_worklist_.front();
+ deferred_blocks_worklist_.pop();
+ deferred_blocks_region->AddBlock(current, data());
+
+ const InstructionBlock* curr_block = data()->GetBlock(current);
+ // Check for whether the predecessor blocks are still deferred.
+ if (IsDeferredBlockBoundary(curr_block->predecessors())) {
+ // If not, mark the predecessor as having a deferred successor.
+ data()
+ ->block_state(curr_block->predecessors()[0])
+ .MarkAsDeferredBlockBoundary();
+ } else {
+ // Otherwise process predecessors.
+ for (RpoNumber pred : curr_block->predecessors()) {
+ if (deferred_blocks_processed_.count(pred) == 0) {
+ deferred_blocks_worklist_.push(pred);
+ deferred_blocks_processed_.insert(pred);
+ }
+ }
+ }
+
+ // Check for whether the successor blocks are still deferred.
+ // Process any unprocessed successors if we aren't at a boundary.
+ if (IsDeferredBlockBoundary(curr_block->successors())) {
+ // If not, mark the predecessor as having a deferred successor.
+ data()->block_state(current).MarkAsDeferredBlockBoundary();
+ } else {
+ // Otherwise process successors.
+ for (RpoNumber succ : curr_block->successors()) {
+ if (deferred_blocks_processed_.count(succ) == 0) {
+ deferred_blocks_worklist_.push(succ);
+ deferred_blocks_processed_.insert(succ);
+ }
+ }
+ }
+ }
+}
void MidTierOutputProcessor::InitializeBlockState(
const InstructionBlock* block) {
@@ -2013,8 +2572,13 @@ void MidTierOutputProcessor::InitializeBlockState(
}
}
- // Mark this block as dominating itself.
BlockState& block_state = data()->block_state(block->rpo_number());
+
+ if (block->IsDeferred() && !block_state.deferred_blocks_region()) {
+ PopulateDeferredBlockRegion(block->rpo_number());
+ }
+
+ // Mark this block as dominating itself.
block_state.dominated_blocks()->Add(block->rpo_number().ToInt());
if (block->dominator().IsValid()) {
@@ -2030,6 +2594,8 @@ void MidTierOutputProcessor::InitializeBlockState(
void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
int block_start = block->first_instruction_index();
+ bool is_deferred = block->IsDeferred();
+
for (int index = block->last_instruction_index(); index >= block_start;
index--) {
Instruction* instr = code()->InstructionAt(index);
@@ -2042,25 +2608,30 @@ void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
ConstantOperand* constant_operand = ConstantOperand::cast(output);
int virtual_register = constant_operand->virtual_register();
VirtualRegisterDataFor(virtual_register)
- .DefineAsConstantOperand(constant_operand, index);
+ .DefineAsConstantOperand(constant_operand, index, is_deferred);
} else {
DCHECK(output->IsUnallocated());
UnallocatedOperand* unallocated_operand =
UnallocatedOperand::cast(output);
int virtual_register = unallocated_operand->virtual_register();
+ bool is_exceptional_call_output =
+ instr->IsCallWithDescriptorFlags() &&
+ instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler);
if (unallocated_operand->HasFixedSlotPolicy()) {
// If output has a fixed slot policy, allocate its spill operand now
// so that the register allocator can use this knowledge.
MachineRepresentation rep = RepresentationFor(virtual_register);
- AllocatedOperand* fixed_spill_operand = AllocatedOperand::New(
- allocation_zone(), AllocatedOperand::STACK_SLOT, rep,
- unallocated_operand->fixed_slot_index());
+ AllocatedOperand* fixed_spill_operand =
+ AllocatedOperand::New(zone(), AllocatedOperand::STACK_SLOT, rep,
+ unallocated_operand->fixed_slot_index());
VirtualRegisterDataFor(virtual_register)
.DefineAsFixedSpillOperand(fixed_spill_operand, virtual_register,
- index);
+ index, is_deferred,
+ is_exceptional_call_output);
} else {
VirtualRegisterDataFor(virtual_register)
- .DefineAsUnallocatedOperand(virtual_register, index);
+ .DefineAsUnallocatedOperand(virtual_register, index, is_deferred,
+ is_exceptional_call_output);
}
}
}
@@ -2076,7 +2647,8 @@ void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
for (PhiInstruction* phi : block->phis()) {
int virtual_register = phi->virtual_register();
VirtualRegisterDataFor(virtual_register)
- .DefineAsPhi(virtual_register, block->first_instruction_index());
+ .DefineAsPhi(virtual_register, block->first_instruction_index(),
+ is_deferred);
}
}
@@ -2095,6 +2667,8 @@ void DefineOutputs(MidTierRegisterAllocationData* data) {
class MidTierRegisterAllocator final {
public:
explicit MidTierRegisterAllocator(MidTierRegisterAllocationData* data);
+ MidTierRegisterAllocator(const MidTierRegisterAllocator&) = delete;
+ MidTierRegisterAllocator& operator=(const MidTierRegisterAllocator&) = delete;
void AllocateRegisters(const InstructionBlock* block);
void UpdateSpillRangesForLoops();
@@ -2130,8 +2704,6 @@ class MidTierRegisterAllocator final {
MidTierRegisterAllocationData* const data_;
SinglePassRegisterAllocator general_reg_allocator_;
SinglePassRegisterAllocator double_reg_allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(MidTierRegisterAllocator);
};
MidTierRegisterAllocator::MidTierRegisterAllocator(
@@ -2142,9 +2714,31 @@ MidTierRegisterAllocator::MidTierRegisterAllocator(
void MidTierRegisterAllocator::AllocateRegisters(
const InstructionBlock* block) {
+ RpoNumber block_rpo = block->rpo_number();
+ bool is_deferred_block_boundary =
+ data()->block_state(block_rpo).is_deferred_block_boundary();
+
general_reg_allocator().StartBlock(block);
double_reg_allocator().StartBlock(block);
+ // If the block is not deferred but has deferred successors, then try to
+ // output spill slots for virtual_registers that are only spilled in the
+ // deferred blocks at the start of those deferred blocks to avoid spilling
+ // them at their output in non-deferred blocks.
+ if (is_deferred_block_boundary && !block->IsDeferred()) {
+ for (RpoNumber successor : block->successors()) {
+ if (!data()->GetBlock(successor)->IsDeferred()) continue;
+ DCHECK_GT(successor, block_rpo);
+ for (int virtual_register :
+ *data()->block_state(successor).deferred_blocks_region()) {
+ USE(virtual_register);
+ AllocatorFor(RepresentationFor(virtual_register))
+ .AllocateDeferredBlockSpillOutput(block->last_instruction_index(),
+ successor, virtual_register);
+ }
+ }
+ }
+
// Allocate registers for instructions in reverse, from the end of the block
// to the start.
int block_start = block->first_instruction_index();
@@ -2215,6 +2809,13 @@ void MidTierRegisterAllocator::AllocateRegisters(
// phi gap move operations that are needed to resolve phis in our successor.
if (instr_index == block->last_instruction_index()) {
AllocatePhiGapMoves(block);
+
+ // If this block is deferred but it's successor isn't, update the state to
+ // limit spills to the deferred blocks where possible.
+ if (is_deferred_block_boundary && block->IsDeferred()) {
+ general_reg_allocator().UpdateForDeferredBlock(instr_index);
+ double_reg_allocator().UpdateForDeferredBlock(instr_index);
+ }
}
// Allocate any unallocated gap move inputs.
@@ -2385,6 +2986,9 @@ void AllocateRegisters(MidTierRegisterAllocationData* data) {
class MidTierSpillSlotAllocator final {
public:
explicit MidTierSpillSlotAllocator(MidTierRegisterAllocationData* data);
+ MidTierSpillSlotAllocator(const MidTierSpillSlotAllocator&) = delete;
+ MidTierSpillSlotAllocator& operator=(const MidTierSpillSlotAllocator&) =
+ delete;
void Allocate(VirtualRegisterData* virtual_register);
@@ -2407,14 +3011,14 @@ class MidTierSpillSlotAllocator final {
ZonePriorityQueue<SpillSlot*, OrderByLastUse> allocated_slots_;
ZoneLinkedList<SpillSlot*> free_slots_;
int position_;
-
- DISALLOW_COPY_AND_ASSIGN(MidTierSpillSlotAllocator);
};
class MidTierSpillSlotAllocator::SpillSlot : public ZoneObject {
public:
SpillSlot(int stack_slot, int byte_width)
: stack_slot_(stack_slot), byte_width_(byte_width), range_() {}
+ SpillSlot(const SpillSlot&) = delete;
+ SpillSlot& operator=(const SpillSlot&) = delete;
void AddRange(const Range& range) { range_.AddRange(range); }
@@ -2429,8 +3033,6 @@ class MidTierSpillSlotAllocator::SpillSlot : public ZoneObject {
int stack_slot_;
int byte_width_;
Range range_;
-
- DISALLOW_COPY_AND_ASSIGN(SpillSlot);
};
bool MidTierSpillSlotAllocator::OrderByLastUse::operator()(
@@ -2525,6 +3127,9 @@ void AllocateSpillSlots(MidTierRegisterAllocationData* data) {
class MidTierReferenceMapPopulator final {
public:
explicit MidTierReferenceMapPopulator(MidTierRegisterAllocationData* data);
+ MidTierReferenceMapPopulator(const MidTierReferenceMapPopulator&) = delete;
+ MidTierReferenceMapPopulator& operator=(const MidTierReferenceMapPopulator&) =
+ delete;
void RecordReferences(const VirtualRegisterData& virtual_register);
@@ -2533,8 +3138,6 @@ class MidTierReferenceMapPopulator final {
InstructionSequence* code() const { return data()->code(); }
MidTierRegisterAllocationData* const data_;
-
- DISALLOW_COPY_AND_ASSIGN(MidTierReferenceMapPopulator);
};
MidTierReferenceMapPopulator::MidTierReferenceMapPopulator(
diff --git a/deps/v8/src/compiler/backend/mid-tier-register-allocator.h b/deps/v8/src/compiler/backend/mid-tier-register-allocator.h
index 6d8006badf..2440115095 100644
--- a/deps/v8/src/compiler/backend/mid-tier-register-allocator.h
+++ b/deps/v8/src/compiler/backend/mid-tier-register-allocator.h
@@ -34,6 +34,9 @@ class MidTierRegisterAllocationData final : public RegisterAllocationData {
InstructionSequence* code,
TickCounter* tick_counter,
const char* debug_name = nullptr);
+ MidTierRegisterAllocationData(const MidTierRegisterAllocationData&) = delete;
+ MidTierRegisterAllocationData& operator=(
+ const MidTierRegisterAllocationData&) = delete;
static MidTierRegisterAllocationData* cast(RegisterAllocationData* data) {
DCHECK_EQ(data->type(), Type::kMidTier);
@@ -57,8 +60,8 @@ class MidTierRegisterAllocationData final : public RegisterAllocationData {
const InstructionBlock* GetBlock(int instr_index);
// Returns a bitvector representing all the blocks that are dominated by the
- // output of the instruction at |instr_index|.
- const BitVector* GetBlocksDominatedBy(int instr_index);
+ // output of the instruction in |block|.
+ const BitVector* GetBlocksDominatedBy(const InstructionBlock* block);
// List of all instruction indexs that require a reference map.
ZoneVector<int>& reference_map_instructions() {
@@ -97,8 +100,6 @@ class MidTierRegisterAllocationData final : public RegisterAllocationData {
BitVector spilled_virtual_registers_;
TickCounter* const tick_counter_;
-
- DISALLOW_COPY_AND_ASSIGN(MidTierRegisterAllocationData);
};
// Phase 1: Process instruction outputs to determine how each virtual register
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 5457883fee..c8265d73ae 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -657,7 +657,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Call(reg, reg, Code::kHeaderSize - kHeapObjectTag);
}
@@ -697,7 +697,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Addu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
__ Jump(reg);
@@ -722,7 +722,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Jump(reg);
frame_access_state()->ClearSPDelta();
@@ -868,8 +868,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- CodeGenResult result = AssembleDeoptimizerCall(exit);
- if (result != kSuccess) return result;
+ __ Branch(exit->label());
break;
}
case kArchRet:
@@ -1729,25 +1728,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
break;
}
- case kMipsS8x16LoadSplat: {
+ case kMipsS128Load8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ lb(kScratchReg, i.MemoryOperand());
__ fill_b(i.OutputSimd128Register(), kScratchReg);
break;
}
- case kMipsS16x8LoadSplat: {
+ case kMipsS128Load16Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ lh(kScratchReg, i.MemoryOperand());
__ fill_h(i.OutputSimd128Register(), kScratchReg);
break;
}
- case kMipsS32x4LoadSplat: {
+ case kMipsS128Load32Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ Lw(kScratchReg, i.MemoryOperand());
__ fill_w(i.OutputSimd128Register(), kScratchReg);
break;
}
- case kMipsS64x2LoadSplat: {
+ case kMipsS128Load64Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
MemOperand memLow = i.MemoryOperand();
@@ -1759,7 +1758,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_w(dst, kSimd128ScratchReg, dst);
break;
}
- case kMipsI16x8Load8x8S: {
+ case kMipsS128Load8x8S: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
MemOperand memLow = i.MemoryOperand();
@@ -1773,7 +1772,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_b(dst, kSimd128ScratchReg, dst);
break;
}
- case kMipsI16x8Load8x8U: {
+ case kMipsS128Load8x8U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
MemOperand memLow = i.MemoryOperand();
@@ -1786,7 +1785,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_b(dst, kSimd128RegZero, dst);
break;
}
- case kMipsI32x4Load16x4S: {
+ case kMipsS128Load16x4S: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
MemOperand memLow = i.MemoryOperand();
@@ -1800,7 +1799,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_h(dst, kSimd128ScratchReg, dst);
break;
}
- case kMipsI32x4Load16x4U: {
+ case kMipsS128Load16x4U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
MemOperand memLow = i.MemoryOperand();
@@ -1813,7 +1812,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_h(dst, kSimd128RegZero, dst);
break;
}
- case kMipsI64x2Load32x2S: {
+ case kMipsS128Load32x2S: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
MemOperand memLow = i.MemoryOperand();
@@ -1827,7 +1826,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_w(dst, kSimd128ScratchReg, dst);
break;
}
- case kMipsI64x2Load32x2U: {
+ case kMipsS128Load32x2U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
MemOperand memLow = i.MemoryOperand();
@@ -2585,6 +2584,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ copy_u_b(dst, scratch0, 0);
break;
}
+ case kMipsI32x4DotI16x8S: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ dotp_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
case kMipsI16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2643,7 +2648,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMipsI16x8AddSaturateS: {
+ case kMipsI16x8AddSatS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2655,7 +2660,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMipsI16x8SubSaturateS: {
+ case kMipsI16x8SubSatS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2704,13 +2709,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
- case kMipsI16x8AddSaturateU: {
+ case kMipsI16x8AddSatU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kMipsI16x8SubSaturateU: {
+ case kMipsI16x8SubSatU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2821,7 +2826,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMipsI8x16AddSaturateS: {
+ case kMipsI8x16AddSatS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2833,7 +2838,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMipsI8x16SubSaturateS: {
+ case kMipsI8x16SubSatS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2888,13 +2893,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt3(1));
break;
}
- case kMipsI8x16AddSaturateU: {
+ case kMipsI8x16AddSatU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kMipsI8x16SubSaturateU: {
+ case kMipsI8x16SubSatU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -4026,9 +4031,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
@@ -4048,41 +4052,81 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
MipsOperandConverter g(this, nullptr);
+ const int parameter_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
+
+ // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_count != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (__ emit_debug_code()) {
+ __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
+ g.ToRegister(additional_pop_count),
+ Operand(static_cast<int64_t>(0)));
+ }
+ }
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_count} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_count != 0;
+#else
+ const bool drop_jsargs = false;
+#endif
+
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
// number of stack slot pops.
- if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
if (return_label_.is_bound()) {
__ Branch(&return_label_);
return;
} else {
__ bind(&return_label_);
- AssembleDeconstructFrame();
}
- } else {
- AssembleDeconstructFrame();
}
+ if (drop_jsargs) {
+ // Get the actual argument count
+ __ Lw(t0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
}
- if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
- pop_count += g.ToConstant(pop).ToInt32();
+
+ if (drop_jsargs) {
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ __ Addu(t0, t0, Operand(1)); // Also pop the receiver.
+ if (parameter_count > 1) {
+ __ li(kScratchReg, parameter_count);
+ __ slt(kScratchReg2, t0, kScratchReg);
+ __ movn(t0, kScratchReg, kScratchReg2);
+ }
+ __ sll(t0, t0, kSystemPointerSizeLog2);
+ __ Addu(sp, sp, t0);
+ } else if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ __ Drop(parameter_count + additional_count);
} else {
- Register pop_reg = g.ToRegister(pop);
+ Register pop_reg = g.ToRegister(additional_pop_count);
+ __ Drop(parameter_count);
__ sll(pop_reg, pop_reg, kSystemPointerSizeLog2);
- __ Addu(sp, sp, Operand(pop_reg));
- }
- if (pop_count != 0) {
- __ DropAndRet(pop_count);
- } else {
- __ Ret();
+ __ Addu(sp, sp, pop_reg);
}
+ __ Ret();
}
void CodeGenerator::FinishCode() {}
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index 46ce3d359a..47d439af58 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -217,6 +217,7 @@ namespace compiler {
V(MipsI32x4GeU) \
V(MipsI32x4Abs) \
V(MipsI32x4BitMask) \
+ V(MipsI32x4DotI16x8S) \
V(MipsI16x8Splat) \
V(MipsI16x8ExtractLaneU) \
V(MipsI16x8ExtractLaneS) \
@@ -226,10 +227,10 @@ namespace compiler {
V(MipsI16x8ShrS) \
V(MipsI16x8ShrU) \
V(MipsI16x8Add) \
- V(MipsI16x8AddSaturateS) \
+ V(MipsI16x8AddSatS) \
V(MipsI16x8AddHoriz) \
V(MipsI16x8Sub) \
- V(MipsI16x8SubSaturateS) \
+ V(MipsI16x8SubSatS) \
V(MipsI16x8Mul) \
V(MipsI16x8MaxS) \
V(MipsI16x8MinS) \
@@ -237,8 +238,8 @@ namespace compiler {
V(MipsI16x8Ne) \
V(MipsI16x8GtS) \
V(MipsI16x8GeS) \
- V(MipsI16x8AddSaturateU) \
- V(MipsI16x8SubSaturateU) \
+ V(MipsI16x8AddSatU) \
+ V(MipsI16x8SubSatU) \
V(MipsI16x8MaxU) \
V(MipsI16x8MinU) \
V(MipsI16x8GtU) \
@@ -254,9 +255,9 @@ namespace compiler {
V(MipsI8x16Shl) \
V(MipsI8x16ShrS) \
V(MipsI8x16Add) \
- V(MipsI8x16AddSaturateS) \
+ V(MipsI8x16AddSatS) \
V(MipsI8x16Sub) \
- V(MipsI8x16SubSaturateS) \
+ V(MipsI8x16SubSatS) \
V(MipsI8x16Mul) \
V(MipsI8x16MaxS) \
V(MipsI8x16MinS) \
@@ -265,8 +266,8 @@ namespace compiler {
V(MipsI8x16GtS) \
V(MipsI8x16GeS) \
V(MipsI8x16ShrU) \
- V(MipsI8x16AddSaturateU) \
- V(MipsI8x16SubSaturateU) \
+ V(MipsI8x16AddSatU) \
+ V(MipsI8x16SubSatU) \
V(MipsI8x16MaxU) \
V(MipsI8x16MinU) \
V(MipsI8x16GtU) \
@@ -313,16 +314,16 @@ namespace compiler {
V(MipsS8x8Reverse) \
V(MipsS8x4Reverse) \
V(MipsS8x2Reverse) \
- V(MipsS8x16LoadSplat) \
- V(MipsS16x8LoadSplat) \
- V(MipsS32x4LoadSplat) \
- V(MipsS64x2LoadSplat) \
- V(MipsI16x8Load8x8S) \
- V(MipsI16x8Load8x8U) \
- V(MipsI32x4Load16x4S) \
- V(MipsI32x4Load16x4U) \
- V(MipsI64x2Load32x2S) \
- V(MipsI64x2Load32x2U) \
+ V(MipsS128Load8Splat) \
+ V(MipsS128Load16Splat) \
+ V(MipsS128Load32Splat) \
+ V(MipsS128Load64Splat) \
+ V(MipsS128Load8x8S) \
+ V(MipsS128Load8x8U) \
+ V(MipsS128Load16x4S) \
+ V(MipsS128Load16x4U) \
+ V(MipsS128Load32x2S) \
+ V(MipsS128Load32x2U) \
V(MipsMsaLd) \
V(MipsMsaSt) \
V(MipsI32x4SConvertI16x8Low) \
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 64e78b8122..bf28eec443 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -118,8 +118,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsFloorWS:
case kMipsI16x8Add:
case kMipsI16x8AddHoriz:
- case kMipsI16x8AddSaturateS:
- case kMipsI16x8AddSaturateU:
+ case kMipsI16x8AddSatS:
+ case kMipsI16x8AddSatU:
case kMipsI16x8Eq:
case kMipsI16x8ExtractLaneU:
case kMipsI16x8ExtractLaneS:
@@ -144,8 +144,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI16x8ShrU:
case kMipsI16x8Splat:
case kMipsI16x8Sub:
- case kMipsI16x8SubSaturateS:
- case kMipsI16x8SubSaturateU:
+ case kMipsI16x8SubSatS:
+ case kMipsI16x8SubSatU:
case kMipsI16x8UConvertI32x4:
case kMipsI16x8UConvertI8x16High:
case kMipsI16x8UConvertI8x16Low:
@@ -180,9 +180,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI32x4UConvertI16x8Low:
case kMipsI32x4Abs:
case kMipsI32x4BitMask:
+ case kMipsI32x4DotI16x8S:
case kMipsI8x16Add:
- case kMipsI8x16AddSaturateS:
- case kMipsI8x16AddSaturateU:
+ case kMipsI8x16AddSatS:
+ case kMipsI8x16AddSatU:
case kMipsI8x16Eq:
case kMipsI8x16ExtractLaneU:
case kMipsI8x16ExtractLaneS:
@@ -205,8 +206,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI8x16ShrU:
case kMipsI8x16Splat:
case kMipsI8x16Sub:
- case kMipsI8x16SubSaturateS:
- case kMipsI8x16SubSaturateU:
+ case kMipsI8x16SubSatS:
+ case kMipsI8x16SubSatU:
case kMipsI8x16UConvertI16x8:
case kMipsI8x16Abs:
case kMipsI8x16BitMask:
@@ -315,16 +316,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsUlhu:
case kMipsUlw:
case kMipsUlwc1:
- case kMipsS8x16LoadSplat:
- case kMipsS16x8LoadSplat:
- case kMipsS32x4LoadSplat:
- case kMipsS64x2LoadSplat:
- case kMipsI16x8Load8x8S:
- case kMipsI16x8Load8x8U:
- case kMipsI32x4Load16x4S:
- case kMipsI32x4Load16x4U:
- case kMipsI64x2Load32x2S:
- case kMipsI64x2Load32x2U:
+ case kMipsS128Load8Splat:
+ case kMipsS128Load16Splat:
+ case kMipsS128Load32Splat:
+ case kMipsS128Load64Splat:
+ case kMipsS128Load8x8S:
+ case kMipsS128Load8x8U:
+ case kMipsS128Load16x4S:
+ case kMipsS128Load16x4U:
+ case kMipsS128Load32x2S:
+ case kMipsS128Load32x2U:
case kMipsWord32AtomicPairLoad:
return kIsLoadOperation;
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index b552b0dec1..9b6abc80fa 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -64,8 +64,8 @@ class MipsOperandGenerator final : public OperandGenerator {
bool CanBeImmediate(Node* node, InstructionCode opcode) {
Int32Matcher m(node);
- if (!m.HasValue()) return false;
- int32_t value = m.Value();
+ if (!m.HasResolvedValue()) return false;
+ int32_t value = m.ResolvedValue();
switch (ArchOpcodeField::decode(opcode)) {
case kMipsShl:
case kMipsSar:
@@ -292,35 +292,35 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
InstructionCode opcode = kArchNop;
switch (params.transformation) {
- case LoadTransformation::kS8x16LoadSplat:
- opcode = kMipsS8x16LoadSplat;
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kMipsS128Load8Splat;
break;
- case LoadTransformation::kS16x8LoadSplat:
- opcode = kMipsS16x8LoadSplat;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kMipsS128Load16Splat;
break;
- case LoadTransformation::kS32x4LoadSplat:
- opcode = kMipsS32x4LoadSplat;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kMipsS128Load32Splat;
break;
- case LoadTransformation::kS64x2LoadSplat:
- opcode = kMipsS64x2LoadSplat;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kMipsS128Load64Splat;
break;
- case LoadTransformation::kI16x8Load8x8S:
- opcode = kMipsI16x8Load8x8S;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kMipsS128Load8x8S;
break;
- case LoadTransformation::kI16x8Load8x8U:
- opcode = kMipsI16x8Load8x8U;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kMipsS128Load8x8U;
break;
- case LoadTransformation::kI32x4Load16x4S:
- opcode = kMipsI32x4Load16x4S;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kMipsS128Load16x4S;
break;
- case LoadTransformation::kI32x4Load16x4U:
- opcode = kMipsI32x4Load16x4U;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kMipsS128Load16x4U;
break;
- case LoadTransformation::kI64x2Load32x2S:
- opcode = kMipsI64x2Load32x2S;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kMipsS128Load32x2S;
break;
- case LoadTransformation::kI64x2Load32x2U:
- opcode = kMipsI64x2Load32x2U;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kMipsS128Load32x2U;
break;
default:
UNIMPLEMENTED();
@@ -431,7 +431,7 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kMipsSwc1;
@@ -460,7 +460,6 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
if (g.CanBeImmediate(index, opcode)) {
@@ -487,8 +486,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
- m.right().HasValue()) {
- uint32_t mask = m.right().Value();
+ m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
@@ -498,9 +497,9 @@ void InstructionSelector::VisitWord32And(Node* node) {
// Select Ext for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasResolvedValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
- uint32_t lsb = mleft.right().Value() & 0x1F;
+ uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
// Ext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -520,8 +519,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
// Other cases fall through to the normal And operation.
}
}
- if (m.right().HasValue()) {
- uint32_t mask = m.right().Value();
+ if (m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
uint32_t shift = base::bits::CountPopulation(~mask);
uint32_t msb = base::bits::CountLeadingZeros32(~mask);
if (shift != 0 && shift != 32 && msb + shift == 32) {
@@ -544,7 +543,7 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
m.right().Is(-1)) {
Int32BinopMatcher mleft(m.left().node());
- if (!mleft.right().HasValue()) {
+ if (!mleft.right().HasResolvedValue()) {
MipsOperandGenerator g(this);
Emit(kMipsNor, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
@@ -570,12 +569,12 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
Int32BinopMatcher mleft(m.left().node());
// Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
// contiguous, and the shift immediate non-zero.
- if (mleft.right().HasValue()) {
- uint32_t mask = mleft.right().Value();
+ if (mleft.right().HasResolvedValue()) {
+ uint32_t mask = mleft.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
- uint32_t shift = m.right().Value();
+ uint32_t shift = m.right().ResolvedValue();
DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
DCHECK_NE(0u, shift);
if ((shift + mask_width) >= 32) {
@@ -594,13 +593,14 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
- if (m.left().IsWord32And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x1F;
+ if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && mleft.right().Value() != 0) {
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
// Select Ext for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
- uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_msb + mask_width + lsb) == 32) {
@@ -621,10 +621,10 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
- if (m.right().HasValue() && mleft.right().HasValue()) {
+ if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
MipsOperandGenerator g(this);
- uint32_t sar = m.right().Value();
- uint32_t shl = mleft.right().Value();
+ uint32_t sar = m.right().ResolvedValue();
+ uint32_t shl = mleft.right().ResolvedValue();
if ((sar == shl) && (sar == 16)) {
Emit(kMipsSeh, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()));
@@ -685,7 +685,7 @@ static void VisitWord32PairShift(InstructionSelector* selector,
MipsOperandGenerator g(selector);
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
shift_operand = g.UseImmediate(m.node());
} else {
shift_operand = g.UseUniqueRegister(m.node());
@@ -869,8 +869,9 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.right().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
- if (mright.right().HasValue() && !m.left().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+ if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mright.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMipsLsa, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
@@ -885,8 +886,9 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.left().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && !m.right().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+ if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mleft.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMipsLsa, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
@@ -908,8 +910,8 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
void InstructionSelector::VisitInt32Mul(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
- if (m.right().HasValue() && m.right().Value() > 0) {
- uint32_t value = static_cast<uint32_t>(m.right().Value());
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
if (base::bits::IsPowerOfTwo(value)) {
Emit(kMipsShl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
@@ -1386,7 +1388,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb;
@@ -1439,7 +1441,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
// TODO(mips): I guess this could be done in a better way.
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kMipsUswc1;
@@ -1887,7 +1889,7 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode =
@@ -1923,7 +1925,7 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kWord32AtomicStoreWord8;
@@ -1957,7 +1959,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -1971,7 +1973,6 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
- return;
}
AddressingMode addressing_mode = kMode_MRI;
@@ -1996,7 +1997,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
@@ -2010,7 +2011,6 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
- return;
}
AddressingMode addressing_mode = kMode_MRI;
@@ -2037,7 +2037,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = int8_op;
@@ -2051,7 +2051,6 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
opcode = word32_op;
} else {
UNREACHABLE();
- return;
}
AddressingMode addressing_mode = kMode_MRI;
@@ -2195,13 +2194,14 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4GeU, kMipsI32x4GeU) \
V(I32x4Abs, kMipsI32x4Abs) \
V(I32x4BitMask, kMipsI32x4BitMask) \
+ V(I32x4DotI16x8S, kMipsI32x4DotI16x8S) \
V(I16x8Add, kMipsI16x8Add) \
- V(I16x8AddSaturateS, kMipsI16x8AddSaturateS) \
- V(I16x8AddSaturateU, kMipsI16x8AddSaturateU) \
+ V(I16x8AddSatS, kMipsI16x8AddSatS) \
+ V(I16x8AddSatU, kMipsI16x8AddSatU) \
V(I16x8AddHoriz, kMipsI16x8AddHoriz) \
V(I16x8Sub, kMipsI16x8Sub) \
- V(I16x8SubSaturateS, kMipsI16x8SubSaturateS) \
- V(I16x8SubSaturateU, kMipsI16x8SubSaturateU) \
+ V(I16x8SubSatS, kMipsI16x8SubSatS) \
+ V(I16x8SubSatU, kMipsI16x8SubSatU) \
V(I16x8Mul, kMipsI16x8Mul) \
V(I16x8MaxS, kMipsI16x8MaxS) \
V(I16x8MinS, kMipsI16x8MinS) \
@@ -2219,11 +2219,11 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8Abs, kMipsI16x8Abs) \
V(I16x8BitMask, kMipsI16x8BitMask) \
V(I8x16Add, kMipsI8x16Add) \
- V(I8x16AddSaturateS, kMipsI8x16AddSaturateS) \
- V(I8x16AddSaturateU, kMipsI8x16AddSaturateU) \
+ V(I8x16AddSatS, kMipsI8x16AddSatS) \
+ V(I8x16AddSatU, kMipsI8x16AddSatU) \
V(I8x16Sub, kMipsI8x16Sub) \
- V(I8x16SubSaturateS, kMipsI8x16SubSaturateS) \
- V(I8x16SubSaturateU, kMipsI8x16SubSaturateU) \
+ V(I8x16SubSatS, kMipsI8x16SubSatS) \
+ V(I8x16SubSatU, kMipsI8x16SubSatU) \
V(I8x16Mul, kMipsI8x16Mul) \
V(I8x16MaxS, kMipsI8x16MaxS) \
V(I8x16MinS, kMipsI8x16MinS) \
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index bb01eab924..887b7e5740 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -628,7 +628,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
__ Call(reg);
@@ -675,7 +675,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
__ Jump(reg);
@@ -701,7 +701,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Jump(reg);
frame_access_state()->ClearSPDelta();
@@ -847,8 +847,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- CodeGenResult result = AssembleDeoptimizerCall(exit);
- if (result != kSuccess) return result;
+ __ Branch(exit->label());
break;
}
case kArchRet:
@@ -1869,31 +1868,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
break;
}
- case kMips64S8x16LoadSplat: {
+ case kMips64S128Load8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ Lb(kScratchReg, i.MemoryOperand());
__ fill_b(i.OutputSimd128Register(), kScratchReg);
break;
}
- case kMips64S16x8LoadSplat: {
+ case kMips64S128Load16Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ Lh(kScratchReg, i.MemoryOperand());
__ fill_h(i.OutputSimd128Register(), kScratchReg);
break;
}
- case kMips64S32x4LoadSplat: {
+ case kMips64S128Load32Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ Lw(kScratchReg, i.MemoryOperand());
__ fill_w(i.OutputSimd128Register(), kScratchReg);
break;
}
- case kMips64S64x2LoadSplat: {
+ case kMips64S128Load64Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ Ld(kScratchReg, i.MemoryOperand());
__ fill_d(i.OutputSimd128Register(), kScratchReg);
break;
}
- case kMips64I16x8Load8x8S: {
+ case kMips64S128Load8x8S: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register scratch = kSimd128ScratchReg;
@@ -1903,7 +1902,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_b(dst, scratch, dst);
break;
}
- case kMips64I16x8Load8x8U: {
+ case kMips64S128Load8x8U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
@@ -1912,7 +1911,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_b(dst, kSimd128RegZero, dst);
break;
}
- case kMips64I32x4Load16x4S: {
+ case kMips64S128Load16x4S: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register scratch = kSimd128ScratchReg;
@@ -1922,7 +1921,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_h(dst, scratch, dst);
break;
}
- case kMips64I32x4Load16x4U: {
+ case kMips64S128Load16x4U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
@@ -1931,7 +1930,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_h(dst, kSimd128RegZero, dst);
break;
}
- case kMips64I64x2Load32x2S: {
+ case kMips64S128Load32x2S: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register scratch = kSimd128ScratchReg;
@@ -1941,7 +1940,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_w(dst, scratch, dst);
break;
}
- case kMips64I64x2Load32x2U: {
+ case kMips64S128Load32x2U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
@@ -1950,6 +1949,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_w(dst, kSimd128RegZero, dst);
break;
}
+ case kMips64S128Load32Zero: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ xor_v(dst, dst, dst);
+ __ Lwu(kScratchReg, i.MemoryOperand());
+ __ insert_w(dst, 0, kScratchReg);
+ break;
+ }
+ case kMips64S128Load64Zero: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ xor_v(dst, dst, dst);
+ __ Ld(kScratchReg, i.MemoryOperand());
+ __ insert_d(dst, 0, kScratchReg);
+ break;
+ }
case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
break;
@@ -2196,9 +2211,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register src1 = i.InputSimd128Register(1);
Simd128Register scratch0 = kSimd128RegZero;
Simd128Register scratch1 = kSimd128ScratchReg;
- // MSA follows IEEE 754-2008 comparision rules:
- // 1. All NaN-related comparsions get false.
- // 2. +0.0 equals to -0.0.
// If inputs are -0.0. and +0.0, then write -0.0 to scratch1.
// scratch1 = (src0 == src1) ? (src0 | src1) : (src1 | src1).
@@ -2208,9 +2220,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// scratch0 = isNaN(src0) ? src0 : scratch1.
__ fseq_d(scratch0, src0, src0);
__ bsel_v(scratch0, src0, scratch1);
- // dst = (src0 < scratch0) ? src0 : scratch0.
- __ fslt_d(dst, src0, scratch0);
- __ bsel_v(dst, scratch0, src0);
+ // scratch1 = (src0 < scratch0) ? src0 : scratch0.
+ __ fslt_d(scratch1, src0, scratch0);
+ __ bsel_v(scratch1, scratch0, src0);
+ // Canonicalize the result.
+ __ fmin_d(dst, scratch1, scratch1);
break;
}
case kMips64F64x2Max: {
@@ -2220,9 +2234,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register src1 = i.InputSimd128Register(1);
Simd128Register scratch0 = kSimd128RegZero;
Simd128Register scratch1 = kSimd128ScratchReg;
- // MSA follows IEEE 754-2008 comparision rules:
- // 1. All NaN-related comparsions get false.
- // 2. +0.0 equals to -0.0.
// If inputs are -0.0. and +0.0, then write +0.0 to scratch1.
// scratch1 = (src0 == src1) ? (src0 & src1) : (src1 & src1).
@@ -2232,9 +2243,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// scratch0 = isNaN(src0) ? src0 : scratch1.
__ fseq_d(scratch0, src0, src0);
__ bsel_v(scratch0, src0, scratch1);
- // dst = (scratch0 < src0) ? src0 : scratch0.
- __ fslt_d(dst, scratch0, src0);
- __ bsel_v(dst, scratch0, src0);
+ // scratch1 = (scratch0 < src0) ? src0 : scratch0.
+ __ fslt_d(scratch1, scratch0, src0);
+ __ bsel_v(scratch1, scratch0, src0);
+ // Canonicalize the result.
+ __ fmax_d(dst, scratch1, scratch1);
break;
}
case kMips64F64x2Eq: {
@@ -2590,9 +2603,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register src1 = i.InputSimd128Register(1);
Simd128Register scratch0 = kSimd128RegZero;
Simd128Register scratch1 = kSimd128ScratchReg;
- // MSA follows IEEE 754-2008 comparision rules:
- // 1. All NaN-related comparsions get false.
- // 2. +0.0 equals to -0.0.
// If inputs are -0.0. and +0.0, then write +0.0 to scratch1.
// scratch1 = (src0 == src1) ? (src0 & src1) : (src1 & src1).
@@ -2602,9 +2612,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// scratch0 = isNaN(src0) ? src0 : scratch1.
__ fseq_w(scratch0, src0, src0);
__ bsel_v(scratch0, src0, scratch1);
- // dst = (scratch0 < src0) ? src0 : scratch0.
- __ fslt_w(dst, scratch0, src0);
- __ bsel_v(dst, scratch0, src0);
+ // scratch1 = (scratch0 < src0) ? src0 : scratch0.
+ __ fslt_w(scratch1, scratch0, src0);
+ __ bsel_v(scratch1, scratch0, src0);
+ // Canonicalize the result.
+ __ fmax_w(dst, scratch1, scratch1);
break;
}
case kMips64F32x4Min: {
@@ -2614,9 +2626,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register src1 = i.InputSimd128Register(1);
Simd128Register scratch0 = kSimd128RegZero;
Simd128Register scratch1 = kSimd128ScratchReg;
- // MSA follows IEEE 754-2008 comparision rules:
- // 1. All NaN-related comparsions get false.
- // 2. +0.0 equals to -0.0.
// If inputs are -0.0. and +0.0, then write -0.0 to scratch1.
// scratch1 = (src0 == src1) ? (src0 | src1) : (src1 | src1).
@@ -2626,9 +2635,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// scratch0 = isNaN(src0) ? src0 : scratch1.
__ fseq_w(scratch0, src0, src0);
__ bsel_v(scratch0, src0, scratch1);
- // dst = (src0 < scratch0) ? src0 : scratch0.
- __ fslt_w(dst, src0, scratch0);
- __ bsel_v(dst, scratch0, src0);
+ // scratch1 = (src0 < scratch0) ? src0 : scratch0.
+ __ fslt_w(scratch1, src0, scratch0);
+ __ bsel_v(scratch1, scratch0, src0);
+ // Canonicalize the result.
+ __ fmin_w(dst, scratch1, scratch1);
break;
}
case kMips64F32x4Eq: {
@@ -2767,6 +2778,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ copy_u_b(dst, scratch0, 0);
break;
}
+ case kMips64I32x4DotI16x8S: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ dotp_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
case kMips64I16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2843,7 +2860,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMips64I16x8AddSaturateS: {
+ case kMips64I16x8AddSatS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2855,7 +2872,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMips64I16x8SubSaturateS: {
+ case kMips64I16x8SubSatS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2904,13 +2921,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
- case kMips64I16x8AddSaturateU: {
+ case kMips64I16x8AddSatU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kMips64I16x8SubSaturateU: {
+ case kMips64I16x8SubSatU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -3034,7 +3051,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMips64I8x16AddSaturateS: {
+ case kMips64I8x16AddSatS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -3046,7 +3063,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMips64I8x16SubSaturateS: {
+ case kMips64I8x16SubSatS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -3107,13 +3124,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kMips64I8x16AddSaturateU: {
+ case kMips64I8x16AddSatU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kMips64I8x16SubSaturateU: {
+ case kMips64I8x16SubSatU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -4297,7 +4314,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
const int returns = frame()->GetReturnSlotCount();
@@ -4318,41 +4335,81 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
MipsOperandConverter g(this, nullptr);
+
+ const int parameter_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
+
+ // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_count != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (__ emit_debug_code()) {
+ __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
+ g.ToRegister(additional_pop_count),
+ Operand(static_cast<int64_t>(0)));
+ }
+ }
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_count} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_count != 0;
+#else
+ const bool drop_jsargs = false;
+#endif
+
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
// number of stack slot pops.
- if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
if (return_label_.is_bound()) {
__ Branch(&return_label_);
return;
} else {
__ bind(&return_label_);
- AssembleDeconstructFrame();
}
- } else {
- AssembleDeconstructFrame();
}
+ if (drop_jsargs) {
+ // Get the actual argument count
+ __ Ld(t0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
}
- int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
- if (pop->IsImmediate()) {
- pop_count += g.ToConstant(pop).ToInt32();
+ if (drop_jsargs) {
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ __ Daddu(t0, t0, Operand(1)); // Also pop the receiver.
+ if (parameter_count > 1) {
+ __ li(kScratchReg, parameter_count);
+ __ slt(kScratchReg2, t0, kScratchReg);
+ __ movn(t0, kScratchReg, kScratchReg2);
+ }
+ __ dsll(t0, t0, kSystemPointerSizeLog2);
+ __ Daddu(sp, sp, t0);
+ } else if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ __ Drop(parameter_count + additional_count);
} else {
- Register pop_reg = g.ToRegister(pop);
+ Register pop_reg = g.ToRegister(additional_pop_count);
+ __ Drop(parameter_count);
__ dsll(pop_reg, pop_reg, kSystemPointerSizeLog2);
__ Daddu(sp, sp, pop_reg);
}
- if (pop_count != 0) {
- __ DropAndRet(pop_count);
- } else {
- __ Ret();
- }
+ __ Ret();
}
void CodeGenerator::FinishCode() {}
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index 577db6347c..18a8e616e7 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -252,6 +252,7 @@ namespace compiler {
V(Mips64I32x4GeU) \
V(Mips64I32x4Abs) \
V(Mips64I32x4BitMask) \
+ V(Mips64I32x4DotI16x8S) \
V(Mips64I16x8Splat) \
V(Mips64I16x8ExtractLaneU) \
V(Mips64I16x8ExtractLaneS) \
@@ -261,10 +262,10 @@ namespace compiler {
V(Mips64I16x8ShrS) \
V(Mips64I16x8ShrU) \
V(Mips64I16x8Add) \
- V(Mips64I16x8AddSaturateS) \
+ V(Mips64I16x8AddSatS) \
V(Mips64I16x8AddHoriz) \
V(Mips64I16x8Sub) \
- V(Mips64I16x8SubSaturateS) \
+ V(Mips64I16x8SubSatS) \
V(Mips64I16x8Mul) \
V(Mips64I16x8MaxS) \
V(Mips64I16x8MinS) \
@@ -272,8 +273,8 @@ namespace compiler {
V(Mips64I16x8Ne) \
V(Mips64I16x8GtS) \
V(Mips64I16x8GeS) \
- V(Mips64I16x8AddSaturateU) \
- V(Mips64I16x8SubSaturateU) \
+ V(Mips64I16x8AddSatU) \
+ V(Mips64I16x8SubSatU) \
V(Mips64I16x8MaxU) \
V(Mips64I16x8MinU) \
V(Mips64I16x8GtU) \
@@ -289,9 +290,9 @@ namespace compiler {
V(Mips64I8x16Shl) \
V(Mips64I8x16ShrS) \
V(Mips64I8x16Add) \
- V(Mips64I8x16AddSaturateS) \
+ V(Mips64I8x16AddSatS) \
V(Mips64I8x16Sub) \
- V(Mips64I8x16SubSaturateS) \
+ V(Mips64I8x16SubSatS) \
V(Mips64I8x16Mul) \
V(Mips64I8x16MaxS) \
V(Mips64I8x16MinS) \
@@ -300,8 +301,8 @@ namespace compiler {
V(Mips64I8x16GtS) \
V(Mips64I8x16GeS) \
V(Mips64I8x16ShrU) \
- V(Mips64I8x16AddSaturateU) \
- V(Mips64I8x16SubSaturateU) \
+ V(Mips64I8x16AddSatU) \
+ V(Mips64I8x16SubSatU) \
V(Mips64I8x16MaxU) \
V(Mips64I8x16MinU) \
V(Mips64I8x16GtU) \
@@ -348,16 +349,18 @@ namespace compiler {
V(Mips64S8x8Reverse) \
V(Mips64S8x4Reverse) \
V(Mips64S8x2Reverse) \
- V(Mips64S8x16LoadSplat) \
- V(Mips64S16x8LoadSplat) \
- V(Mips64S32x4LoadSplat) \
- V(Mips64S64x2LoadSplat) \
- V(Mips64I16x8Load8x8S) \
- V(Mips64I16x8Load8x8U) \
- V(Mips64I32x4Load16x4S) \
- V(Mips64I32x4Load16x4U) \
- V(Mips64I64x2Load32x2S) \
- V(Mips64I64x2Load32x2U) \
+ V(Mips64S128Load8Splat) \
+ V(Mips64S128Load16Splat) \
+ V(Mips64S128Load32Splat) \
+ V(Mips64S128Load64Splat) \
+ V(Mips64S128Load8x8S) \
+ V(Mips64S128Load8x8U) \
+ V(Mips64S128Load16x4S) \
+ V(Mips64S128Load16x4U) \
+ V(Mips64S128Load32x2S) \
+ V(Mips64S128Load32x2U) \
+ V(Mips64S128Load32Zero) \
+ V(Mips64S128Load64Zero) \
V(Mips64MsaLd) \
V(Mips64MsaSt) \
V(Mips64I32x4SConvertI16x8Low) \
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index caf472bf30..0cbaf0cc47 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -149,8 +149,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64FloorWS:
case kMips64I16x8Add:
case kMips64I16x8AddHoriz:
- case kMips64I16x8AddSaturateS:
- case kMips64I16x8AddSaturateU:
+ case kMips64I16x8AddSatS:
+ case kMips64I16x8AddSatU:
case kMips64I16x8Eq:
case kMips64I16x8ExtractLaneU:
case kMips64I16x8ExtractLaneS:
@@ -175,8 +175,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I16x8ShrU:
case kMips64I16x8Splat:
case kMips64I16x8Sub:
- case kMips64I16x8SubSaturateS:
- case kMips64I16x8SubSaturateU:
+ case kMips64I16x8SubSatS:
+ case kMips64I16x8SubSatU:
case kMips64I8x16UConvertI16x8:
case kMips64I16x8UConvertI32x4:
case kMips64I16x8UConvertI8x16High:
@@ -213,9 +213,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I32x4UConvertI16x8Low:
case kMips64I32x4Abs:
case kMips64I32x4BitMask:
+ case kMips64I32x4DotI16x8S:
case kMips64I8x16Add:
- case kMips64I8x16AddSaturateS:
- case kMips64I8x16AddSaturateU:
+ case kMips64I8x16AddSatS:
+ case kMips64I8x16AddSatU:
case kMips64I8x16Eq:
case kMips64I8x16ExtractLaneU:
case kMips64I8x16ExtractLaneS:
@@ -236,8 +237,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I8x16ShrU:
case kMips64I8x16Splat:
case kMips64I8x16Sub:
- case kMips64I8x16SubSaturateS:
- case kMips64I8x16SubSaturateU:
+ case kMips64I8x16SubSatS:
+ case kMips64I8x16SubSatU:
case kMips64I8x16RoundingAverageU:
case kMips64I8x16Abs:
case kMips64I8x16BitMask:
@@ -348,16 +349,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Ulw:
case kMips64Ulwu:
case kMips64Ulwc1:
- case kMips64S8x16LoadSplat:
- case kMips64S16x8LoadSplat:
- case kMips64S32x4LoadSplat:
- case kMips64S64x2LoadSplat:
- case kMips64I16x8Load8x8S:
- case kMips64I16x8Load8x8U:
- case kMips64I32x4Load16x4S:
- case kMips64I32x4Load16x4U:
- case kMips64I64x2Load32x2S:
- case kMips64I64x2Load32x2U:
+ case kMips64S128Load8Splat:
+ case kMips64S128Load16Splat:
+ case kMips64S128Load32Splat:
+ case kMips64S128Load64Splat:
+ case kMips64S128Load8x8S:
+ case kMips64S128Load8x8U:
+ case kMips64S128Load16x4S:
+ case kMips64S128Load16x4U:
+ case kMips64S128Load32x2S:
+ case kMips64S128Load32x2U:
+ case kMips64S128Load32Zero:
+ case kMips64S128Load64Zero:
case kMips64Word64AtomicLoadUint8:
case kMips64Word64AtomicLoadUint16:
case kMips64Word64AtomicLoadUint32:
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 2c807b4183..216b83cdb2 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -386,35 +386,41 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
InstructionCode opcode = kArchNop;
switch (params.transformation) {
- case LoadTransformation::kS8x16LoadSplat:
- opcode = kMips64S8x16LoadSplat;
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kMips64S128Load8Splat;
break;
- case LoadTransformation::kS16x8LoadSplat:
- opcode = kMips64S16x8LoadSplat;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kMips64S128Load16Splat;
break;
- case LoadTransformation::kS32x4LoadSplat:
- opcode = kMips64S32x4LoadSplat;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kMips64S128Load32Splat;
break;
- case LoadTransformation::kS64x2LoadSplat:
- opcode = kMips64S64x2LoadSplat;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kMips64S128Load64Splat;
break;
- case LoadTransformation::kI16x8Load8x8S:
- opcode = kMips64I16x8Load8x8S;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kMips64S128Load8x8S;
break;
- case LoadTransformation::kI16x8Load8x8U:
- opcode = kMips64I16x8Load8x8U;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kMips64S128Load8x8U;
break;
- case LoadTransformation::kI32x4Load16x4S:
- opcode = kMips64I32x4Load16x4S;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kMips64S128Load16x4S;
break;
- case LoadTransformation::kI32x4Load16x4U:
- opcode = kMips64I32x4Load16x4U;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kMips64S128Load16x4U;
break;
- case LoadTransformation::kI64x2Load32x2S:
- opcode = kMips64I64x2Load32x2S;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kMips64S128Load32x2S;
break;
- case LoadTransformation::kI64x2Load32x2U:
- opcode = kMips64I64x2Load32x2U;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kMips64S128Load32x2U;
+ break;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kMips64S128Load32Zero;
+ break;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kMips64S128Load64Zero;
break;
default:
UNIMPLEMENTED();
@@ -504,7 +510,7 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kMips64Swc1;
@@ -535,7 +541,6 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
if (g.CanBeImmediate(index, opcode)) {
@@ -562,8 +567,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
- m.right().HasValue()) {
- uint32_t mask = m.right().Value();
+ m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
@@ -573,9 +578,9 @@ void InstructionSelector::VisitWord32And(Node* node) {
// Select Ext for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasResolvedValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
- uint32_t lsb = mleft.right().Value() & 0x1F;
+ uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
// Ext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -591,8 +596,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
// Other cases fall through to the normal And operation.
}
}
- if (m.right().HasValue()) {
- uint32_t mask = m.right().Value();
+ if (m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
uint32_t shift = base::bits::CountPopulation(~mask);
uint32_t msb = base::bits::CountLeadingZeros32(~mask);
if (shift != 0 && shift != 32 && msb + shift == 32) {
@@ -611,8 +616,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
- m.right().HasValue()) {
- uint64_t mask = m.right().Value();
+ m.right().HasResolvedValue()) {
+ uint64_t mask = m.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
@@ -622,9 +627,10 @@ void InstructionSelector::VisitWord64And(Node* node) {
// Select Dext for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasResolvedValue()) {
// Any shift value can match; int64 shifts use `value % 64`.
- uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3F);
+ uint32_t lsb =
+ static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
// Dext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -644,8 +650,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
// Other cases fall through to the normal And operation.
}
}
- if (m.right().HasValue()) {
- uint64_t mask = m.right().Value();
+ if (m.right().HasResolvedValue()) {
+ uint64_t mask = m.right().ResolvedValue();
uint32_t shift = base::bits::CountPopulation(~mask);
uint32_t msb = base::bits::CountLeadingZeros64(~mask);
if (shift != 0 && shift < 32 && msb + shift == 64) {
@@ -674,7 +680,7 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
m.right().Is(-1)) {
Int32BinopMatcher mleft(m.left().node());
- if (!mleft.right().HasValue()) {
+ if (!mleft.right().HasResolvedValue()) {
Mips64OperandGenerator g(this);
Emit(kMips64Nor32, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
@@ -697,7 +703,7 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
m.right().Is(-1)) {
Int64BinopMatcher mleft(m.left().node());
- if (!mleft.right().HasValue()) {
+ if (!mleft.right().HasResolvedValue()) {
Mips64OperandGenerator g(this);
Emit(kMips64Nor, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
@@ -723,12 +729,12 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
Int32BinopMatcher mleft(m.left().node());
// Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
// contiguous, and the shift immediate non-zero.
- if (mleft.right().HasValue()) {
- uint32_t mask = mleft.right().Value();
+ if (mleft.right().HasResolvedValue()) {
+ uint32_t mask = mleft.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
- uint32_t shift = m.right().Value();
+ uint32_t shift = m.right().ResolvedValue();
DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
DCHECK_NE(0u, shift);
if ((shift + mask_width) >= 32) {
@@ -747,13 +753,14 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
- if (m.left().IsWord32And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x1F;
+ if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && mleft.right().Value() != 0) {
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
// Select Ext for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
- uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_msb + mask_width + lsb) == 32) {
@@ -773,10 +780,10 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
- if (m.right().HasValue() && mleft.right().HasValue()) {
+ if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
Mips64OperandGenerator g(this);
- uint32_t sar = m.right().Value();
- uint32_t shl = mleft.right().Value();
+ uint32_t sar = m.right().ResolvedValue();
+ uint32_t shl = mleft.right().ResolvedValue();
if ((sar == shl) && (sar == 16)) {
Emit(kMips64Seh, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()));
@@ -812,12 +819,12 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
// Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
// contiguous, and the shift immediate non-zero.
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
- uint64_t mask = mleft.right().Value();
+ if (mleft.right().HasResolvedValue()) {
+ uint64_t mask = mleft.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
- uint64_t shift = m.right().Value();
+ uint64_t shift = m.right().ResolvedValue();
DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
DCHECK_NE(0u, shift);
@@ -837,13 +844,14 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
- if (m.left().IsWord64And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x3F;
+ if (m.left().IsWord64And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x3F;
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && mleft.right().Value() != 0) {
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
// Select Dext for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
- uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
+ uint64_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_msb + mask_width + lsb) == 64) {
@@ -935,8 +943,9 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.right().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
- if (mright.right().HasValue() && !m.left().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+ if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mright.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMips64Lsa, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
@@ -951,8 +960,9 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.left().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && !m.right().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+ if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mleft.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMips64Lsa, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
@@ -976,8 +986,9 @@ void InstructionSelector::VisitInt64Add(Node* node) {
if (m.right().opcode() == IrOpcode::kWord64Shl &&
CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
Int64BinopMatcher mright(m.right().node());
- if (mright.right().HasValue() && !m.left().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+ if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mright.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMips64Dlsa, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
@@ -992,8 +1003,9 @@ void InstructionSelector::VisitInt64Add(Node* node) {
if (m.left().opcode() == IrOpcode::kWord64Shl &&
CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && !m.right().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+ if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mleft.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMips64Dlsa, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
@@ -1019,8 +1031,8 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
void InstructionSelector::VisitInt32Mul(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
- if (m.right().HasValue() && m.right().Value() > 0) {
- uint32_t value = static_cast<uint32_t>(m.right().Value());
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
if (base::bits::IsPowerOfTwo(value)) {
Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
@@ -1074,8 +1086,8 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
// TODO(dusmil): Add optimization for shifts larger than 32.
- if (m.right().HasValue() && m.right().Value() > 0) {
- uint32_t value = static_cast<uint32_t>(m.right().Value());
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
if (base::bits::IsPowerOfTwo(value)) {
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
@@ -1389,7 +1401,6 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
EmitLoad(this, value, opcode, node);
} else {
@@ -1746,7 +1757,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kMips64Ulwc1;
@@ -1799,7 +1810,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
Node* value = node->InputAt(2);
UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kMips64Uswc1;
@@ -2532,7 +2543,7 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode =
@@ -2553,7 +2564,7 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kWord32AtomicStoreWord8;
@@ -2573,7 +2584,7 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = kMips64Word64AtomicLoadUint8;
@@ -2595,7 +2606,7 @@ void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kMips64Word64AtomicStoreWord8;
@@ -2617,7 +2628,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -2631,14 +2642,13 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kMips64Word64AtomicExchangeUint8;
@@ -2650,13 +2660,12 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
opcode = kMips64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
@@ -2670,14 +2679,13 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kMips64Word64AtomicCompareExchangeUint8;
@@ -2689,14 +2697,13 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
opcode = kMips64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = int8_op;
@@ -2710,7 +2717,6 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
opcode = word32_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinop(this, node, opcode);
@@ -2733,7 +2739,7 @@ VISIT_ATOMIC_BINOP(Xor)
void InstructionSelector::VisitWord64AtomicBinaryOperation(
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
ArchOpcode uint64_op) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = uint8_op;
@@ -2745,7 +2751,6 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
opcode = uint64_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinop(this, node, opcode);
}
@@ -2879,13 +2884,14 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4GeS, kMips64I32x4GeS) \
V(I32x4GtU, kMips64I32x4GtU) \
V(I32x4GeU, kMips64I32x4GeU) \
+ V(I32x4DotI16x8S, kMips64I32x4DotI16x8S) \
V(I16x8Add, kMips64I16x8Add) \
- V(I16x8AddSaturateS, kMips64I16x8AddSaturateS) \
- V(I16x8AddSaturateU, kMips64I16x8AddSaturateU) \
+ V(I16x8AddSatS, kMips64I16x8AddSatS) \
+ V(I16x8AddSatU, kMips64I16x8AddSatU) \
V(I16x8AddHoriz, kMips64I16x8AddHoriz) \
V(I16x8Sub, kMips64I16x8Sub) \
- V(I16x8SubSaturateS, kMips64I16x8SubSaturateS) \
- V(I16x8SubSaturateU, kMips64I16x8SubSaturateU) \
+ V(I16x8SubSatS, kMips64I16x8SubSatS) \
+ V(I16x8SubSatU, kMips64I16x8SubSatU) \
V(I16x8Mul, kMips64I16x8Mul) \
V(I16x8MaxS, kMips64I16x8MaxS) \
V(I16x8MinS, kMips64I16x8MinS) \
@@ -2901,11 +2907,11 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8SConvertI32x4, kMips64I16x8SConvertI32x4) \
V(I16x8UConvertI32x4, kMips64I16x8UConvertI32x4) \
V(I8x16Add, kMips64I8x16Add) \
- V(I8x16AddSaturateS, kMips64I8x16AddSaturateS) \
- V(I8x16AddSaturateU, kMips64I8x16AddSaturateU) \
+ V(I8x16AddSatS, kMips64I8x16AddSatS) \
+ V(I8x16AddSatU, kMips64I8x16AddSatU) \
V(I8x16Sub, kMips64I8x16Sub) \
- V(I8x16SubSaturateS, kMips64I8x16SubSaturateS) \
- V(I8x16SubSaturateU, kMips64I8x16SubSaturateU) \
+ V(I8x16SubSatS, kMips64I8x16SubSatS) \
+ V(I8x16SubSatU, kMips64I8x16SubSatU) \
V(I8x16Mul, kMips64I8x16Mul) \
V(I8x16MaxS, kMips64I8x16MaxS) \
V(I8x16MinS, kMips64I8x16MinS) \
diff --git a/deps/v8/src/compiler/backend/move-optimizer.h b/deps/v8/src/compiler/backend/move-optimizer.h
index ac3c407393..a63bd52d73 100644
--- a/deps/v8/src/compiler/backend/move-optimizer.h
+++ b/deps/v8/src/compiler/backend/move-optimizer.h
@@ -16,6 +16,9 @@ namespace compiler {
class V8_EXPORT_PRIVATE MoveOptimizer final {
public:
MoveOptimizer(Zone* local_zone, InstructionSequence* code);
+ MoveOptimizer(const MoveOptimizer&) = delete;
+ MoveOptimizer& operator=(const MoveOptimizer&) = delete;
+
void Run();
private:
@@ -57,8 +60,6 @@ class V8_EXPORT_PRIVATE MoveOptimizer final {
// at any given time, so we create two buffers.
ZoneVector<InstructionOperand> operand_buffer1;
ZoneVector<InstructionOperand> operand_buffer2;
-
- DISALLOW_COPY_AND_ASSIGN(MoveOptimizer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/ppc/OWNERS b/deps/v8/src/compiler/backend/ppc/OWNERS
index 6edd45a6ef..02c2cd757c 100644
--- a/deps/v8/src/compiler/backend/ppc/OWNERS
+++ b/deps/v8/src/compiler/backend/ppc/OWNERS
@@ -2,3 +2,4 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
+vasili.skurydzin@ibm.com
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 767247b2fd..ee1ef6d939 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -877,7 +877,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeObject(reg);
} else {
@@ -925,7 +925,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeObject(reg);
} else {
@@ -962,7 +962,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Jump(reg);
frame_access_state()->ClearSPDelta();
@@ -1131,8 +1131,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- CodeGenResult result = AssembleDeoptimizerCall(exit);
- if (result != kSuccess) return result;
+ __ b(exit->label());
break;
}
case kArchRet:
@@ -1929,21 +1928,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
i.OutputRegister(0), kScratchDoubleReg);
#if V8_TARGET_ARCH_PPC64
- if (check_conversion) {
- // Set 2nd output to zero if conversion fails.
CRegister cr = cr7;
int crbit = v8::internal::Assembler::encode_crbit(
cr, static_cast<CRBit>(VXCVI % CRWIDTH));
__ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7
+ // Handle conversion failures (such as overflow).
if (CpuFeatures::IsSupported(ISELECT)) {
- __ li(i.OutputRegister(1), Operand(1));
- __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
+ if (check_conversion) {
+ __ li(i.OutputRegister(1), Operand(1));
+ __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
+ } else {
+ __ isel(i.OutputRegister(0), r0, i.OutputRegister(0), crbit);
+ }
} else {
- __ li(i.OutputRegister(1), Operand::Zero());
- __ bc(v8::internal::kInstrSize * 2, BT, crbit);
- __ li(i.OutputRegister(1), Operand(1));
+ if (check_conversion) {
+ __ li(i.OutputRegister(1), Operand::Zero());
+ __ bc(v8::internal::kInstrSize * 2, BT, crbit);
+ __ li(i.OutputRegister(1), Operand(1));
+ } else {
+ __ mr(ip, i.OutputRegister(0));
+ __ li(i.OutputRegister(0), Operand::Zero());
+ __ bc(v8::internal::kInstrSize * 2, BT, crbit);
+ __ mr(i.OutputRegister(0), ip);
+ }
}
- }
#endif
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
@@ -2270,189 +2278,118 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vspltb(dst, dst, Operand(7));
break;
}
-#define SHIFT_TO_CORRECT_LANE(starting_lane_nummber, lane_input, \
- lane_width_in_bytes, input_register) \
- int shift_bits = abs(lane_input - starting_lane_nummber) * \
- lane_width_in_bytes * kBitsPerByte; \
- if (shift_bits > 0) { \
- __ li(ip, Operand(shift_bits)); \
- __ mtvsrd(kScratchDoubleReg, ip); \
- __ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7)); \
- if (lane_input < starting_lane_nummber) { \
- __ vsro(kScratchDoubleReg, input_register, kScratchDoubleReg); \
- } else { \
- DCHECK(lane_input > starting_lane_nummber); \
- __ vslo(kScratchDoubleReg, input_register, kScratchDoubleReg); \
- } \
- input_register = kScratchDoubleReg; \
- }
case kPPC_F64x2ExtractLane: {
- int32_t lane = 1 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(0, lane, 8, src);
- __ mfvsrd(kScratchReg, src);
+ constexpr int lane_width_in_bytes = 8;
+ __ vextractd(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(kScratchReg, kScratchDoubleReg);
__ MovInt64ToDouble(i.OutputDoubleRegister(), kScratchReg);
break;
}
case kPPC_F32x4ExtractLane: {
- int32_t lane = 3 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(1, lane, 4, src)
- __ mfvsrwz(kScratchReg, src);
+ constexpr int lane_width_in_bytes = 4;
+ __ vextractuw(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(kScratchReg, kScratchDoubleReg);
__ MovIntToFloat(i.OutputDoubleRegister(), kScratchReg);
break;
}
case kPPC_I64x2ExtractLane: {
- int32_t lane = 1 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(0, lane, 8, src)
- __ mfvsrd(i.OutputRegister(), src);
+ constexpr int lane_width_in_bytes = 8;
+ __ vextractd(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
break;
}
case kPPC_I32x4ExtractLane: {
- int32_t lane = 3 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(1, lane, 4, src)
- __ mfvsrwz(i.OutputRegister(), src);
+ constexpr int lane_width_in_bytes = 4;
+ __ vextractuw(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
break;
}
case kPPC_I16x8ExtractLaneU: {
- int32_t lane = 7 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(2, lane, 2, src)
- __ mfvsrwz(r0, src);
- __ li(ip, Operand(16));
- __ srd(i.OutputRegister(), r0, ip);
+ constexpr int lane_width_in_bytes = 2;
+ __ vextractuh(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((7 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
break;
}
case kPPC_I16x8ExtractLaneS: {
- int32_t lane = 7 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(2, lane, 2, src)
- __ mfvsrwz(kScratchReg, src);
- __ sradi(i.OutputRegister(), kScratchReg, 16);
+ constexpr int lane_width_in_bytes = 2;
+ __ vextractuh(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((7 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(kScratchReg, kScratchDoubleReg);
+ __ extsh(i.OutputRegister(), kScratchReg);
break;
}
case kPPC_I8x16ExtractLaneU: {
- int32_t lane = 15 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(4, lane, 1, src)
- __ mfvsrwz(r0, src);
- __ li(ip, Operand(24));
- __ srd(i.OutputRegister(), r0, ip);
+ __ vextractub(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand(15 - i.InputInt8(1)));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
break;
}
case kPPC_I8x16ExtractLaneS: {
- int32_t lane = 15 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(4, lane, 1, src)
- __ mfvsrwz(kScratchReg, src);
- __ sradi(i.OutputRegister(), kScratchReg, 24);
- break;
- }
-#undef SHIFT_TO_CORRECT_LANE
-#define GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane, \
- lane_width_in_bytes) \
- uint64_t mask = 0; \
- for (int i = 0, j = 0; i <= kSimd128Size - 1; i++) { \
- mask <<= kBitsPerByte; \
- if (i >= lane * lane_width_in_bytes && \
- i < lane * lane_width_in_bytes + lane_width_in_bytes) { \
- mask |= replacement_value_byte_lane + j; \
- j++; \
- } else { \
- mask |= i; \
- } \
- if (i == (kSimd128Size / 2) - 1) { \
- __ mov(r0, Operand(mask)); \
- mask = 0; \
- } else if (i >= kSimd128Size - 1) { \
- __ mov(ip, Operand(mask)); \
- } \
- } \
- /* Need to maintain 16 byte alignment for lvx */ \
- __ mr(kScratchReg, sp); \
- __ ClearRightImm(sp, sp, Operand(base::bits::WhichPowerOfTwo(16))); \
- __ addi(sp, sp, Operand(-16)); \
- __ StoreP(ip, MemOperand(sp, 0)); \
- __ StoreP(r0, MemOperand(sp, 8)); \
- __ lvx(kScratchDoubleReg, MemOperand(r0, sp)); \
- __ mr(sp, kScratchReg);
+ __ vextractub(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand(15 - i.InputInt8(1)));
+ __ mfvsrd(kScratchReg, kScratchDoubleReg);
+ __ extsb(i.OutputRegister(), kScratchReg);
+ break;
+ }
case kPPC_F64x2ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 1 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 16;
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 8;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
+ Simd128Register dst = i.OutputSimd128Register();
__ MovDoubleToInt64(r0, i.InputDoubleRegister(2));
- __ mtvsrd(dst, r0);
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ __ mtvsrd(kScratchDoubleReg, r0);
+ __ vinsertd(dst, kScratchDoubleReg,
+ Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
break;
}
case kPPC_F32x4ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 3 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 20;
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 4;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
- __ MovFloatToInt(kScratchReg, i.InputDoubleRegister(2));
- __ mtvsrd(dst, kScratchReg);
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ MovFloatToInt(r0, i.InputDoubleRegister(2));
+ __ mtvsrd(kScratchDoubleReg, r0);
+ __ vinsertw(dst, kScratchDoubleReg,
+ Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
break;
}
case kPPC_I64x2ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 1 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 16;
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 8;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
- __ mtvsrd(dst, i.InputRegister(2));
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ mtvsrd(kScratchDoubleReg, i.InputRegister(2));
+ __ vinsertd(dst, kScratchDoubleReg,
+ Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
break;
}
case kPPC_I32x4ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 3 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 20;
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 4;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
- __ mtvsrd(dst, i.InputRegister(2));
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ mtvsrd(kScratchDoubleReg, i.InputRegister(2));
+ __ vinsertw(dst, kScratchDoubleReg,
+ Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
break;
}
case kPPC_I16x8ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 7 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 22;
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 2;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
- __ mtvsrd(dst, i.InputRegister(2));
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ mtvsrd(kScratchDoubleReg, i.InputRegister(2));
+ __ vinserth(dst, kScratchDoubleReg,
+ Operand((7 - i.InputInt8(1)) * lane_width_in_bytes));
break;
}
case kPPC_I8x16ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 15 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 23;
- constexpr int lane_width_in_bytes = 1;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
- __ mtvsrd(dst, i.InputRegister(2));
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ __ mtvsrd(kScratchDoubleReg, i.InputRegister(2));
+ __ vinsertb(dst, kScratchDoubleReg, Operand(15 - i.InputInt8(1)));
break;
}
-#undef GENERATE_REPLACE_LANE_MASK
case kPPC_F64x2Add: {
__ xvadddp(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -3248,51 +3185,58 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vperm(dst, src0, src1, kScratchDoubleReg);
break;
}
- case kPPC_I16x8AddSaturateS: {
+ case kPPC_I16x8AddSatS: {
__ vaddshs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I16x8SubSaturateS: {
+ case kPPC_I16x8SubSatS: {
__ vsubshs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I16x8AddSaturateU: {
+ case kPPC_I16x8AddSatU: {
__ vadduhs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I16x8SubSaturateU: {
+ case kPPC_I16x8SubSatU: {
__ vsubuhs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I8x16AddSaturateS: {
+ case kPPC_I8x16AddSatS: {
__ vaddsbs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I8x16SubSaturateS: {
+ case kPPC_I8x16SubSatS: {
__ vsubsbs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I8x16AddSaturateU: {
+ case kPPC_I8x16AddSatU: {
__ vaddubs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I8x16SubSaturateU: {
+ case kPPC_I8x16SubSatU: {
__ vsububs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kPPC_I8x16Swizzle: {
- // Reverse the input to match IBM lane numbering.
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1),
+ tempFPReg1 = i.ToSimd128Register(instr->TempAt(0)),
+ tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));
+ // Saturate the indices to 5 bits. Input indices more than 31 should
+ // return 0.
+ __ xxspltib(tempFPReg2, Operand(31));
+ __ vminub(tempFPReg2, src1, tempFPReg2);
__ addi(sp, sp, Operand(-16));
- __ stxvd(i.InputSimd128Register(0), MemOperand(r0, sp));
+ __ stxvd(src0, MemOperand(r0, sp));
__ ldbrx(r0, MemOperand(r0, sp));
__ li(ip, Operand(8));
__ ldbrx(ip, MemOperand(ip, sp));
@@ -3302,8 +3246,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lxvd(kScratchDoubleReg, MemOperand(r0, sp));
__ addi(sp, sp, Operand(16));
__ vxor(tempFPReg1, tempFPReg1, tempFPReg1);
- __ vperm(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
- i.InputSimd128Register(1));
+ __ vperm(dst, kScratchDoubleReg, tempFPReg1, tempFPReg2);
break;
}
case kPPC_F64x2Qfma: {
@@ -3438,6 +3381,42 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xvrspi(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+ case kPPC_I32x4BitMask: {
+ __ mov(kScratchReg,
+ Operand(0x8080808000204060)); // Select 0 for the high bits.
+ __ mtvsrd(kScratchDoubleReg, kScratchReg);
+ __ vbpermq(kScratchDoubleReg, i.InputSimd128Register(0),
+ kScratchDoubleReg);
+ __ vextractub(kScratchDoubleReg, kScratchDoubleReg, Operand(6));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I16x8BitMask: {
+ __ mov(kScratchReg, Operand(0x10203040506070));
+ __ mtvsrd(kScratchDoubleReg, kScratchReg);
+ __ vbpermq(kScratchDoubleReg, i.InputSimd128Register(0),
+ kScratchDoubleReg);
+ __ vextractub(kScratchDoubleReg, kScratchDoubleReg, Operand(6));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16BitMask: {
+ Register temp = i.ToRegister(instr->TempAt(0));
+ __ mov(temp, Operand(0x8101820283038));
+ __ mov(ip, Operand(0x4048505860687078));
+ __ mtvsrdd(kScratchDoubleReg, temp, ip);
+ __ vbpermq(kScratchDoubleReg, i.InputSimd128Register(0),
+ kScratchDoubleReg);
+ __ vextractuh(kScratchDoubleReg, kScratchDoubleReg, Operand(6));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I32x4DotI16x8S: {
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vmsumshm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ break;
+ }
case kPPC_StoreCompressTagged: {
ASSEMBLE_STORE_INTEGER(StoreTaggedField, StoreTaggedFieldX);
break;
@@ -3885,7 +3864,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
void CodeGenerator::FinishCode() {}
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {
+ // __ EmitConstantPool();
+}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index fb5151ebd4..a4cda21d48 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -287,6 +287,8 @@ namespace compiler {
V(PPC_I32x4SConvertI16x8High) \
V(PPC_I32x4UConvertI16x8Low) \
V(PPC_I32x4UConvertI16x8High) \
+ V(PPC_I32x4BitMask) \
+ V(PPC_I32x4DotI16x8S) \
V(PPC_F32x4Qfma) \
V(PPC_F32x4Qfms) \
V(PPC_I16x8Splat) \
@@ -318,11 +320,12 @@ namespace compiler {
V(PPC_I16x8SConvertI8x16High) \
V(PPC_I16x8UConvertI8x16Low) \
V(PPC_I16x8UConvertI8x16High) \
- V(PPC_I16x8AddSaturateS) \
- V(PPC_I16x8SubSaturateS) \
- V(PPC_I16x8AddSaturateU) \
- V(PPC_I16x8SubSaturateU) \
+ V(PPC_I16x8AddSatS) \
+ V(PPC_I16x8SubSatS) \
+ V(PPC_I16x8AddSatU) \
+ V(PPC_I16x8SubSatU) \
V(PPC_I16x8RoundingAverageU) \
+ V(PPC_I16x8BitMask) \
V(PPC_I8x16Splat) \
V(PPC_I8x16ExtractLaneU) \
V(PPC_I8x16ExtractLaneS) \
@@ -347,13 +350,14 @@ namespace compiler {
V(PPC_I8x16Abs) \
V(PPC_I8x16SConvertI16x8) \
V(PPC_I8x16UConvertI16x8) \
- V(PPC_I8x16AddSaturateS) \
- V(PPC_I8x16SubSaturateS) \
- V(PPC_I8x16AddSaturateU) \
- V(PPC_I8x16SubSaturateU) \
+ V(PPC_I8x16AddSatS) \
+ V(PPC_I8x16SubSatS) \
+ V(PPC_I8x16AddSatU) \
+ V(PPC_I8x16SubSatU) \
V(PPC_I8x16RoundingAverageU) \
V(PPC_I8x16Shuffle) \
V(PPC_I8x16Swizzle) \
+ V(PPC_I8x16BitMask) \
V(PPC_V64x2AnyTrue) \
V(PPC_V32x4AnyTrue) \
V(PPC_V16x8AnyTrue) \
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index 8beaa8539c..87ea3f3219 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -212,6 +212,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I32x4SConvertI16x8High:
case kPPC_I32x4UConvertI16x8Low:
case kPPC_I32x4UConvertI16x8High:
+ case kPPC_I32x4BitMask:
+ case kPPC_I32x4DotI16x8S:
case kPPC_I16x8Splat:
case kPPC_I16x8ExtractLaneU:
case kPPC_I16x8ExtractLaneS:
@@ -241,11 +243,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I16x8SConvertI8x16High:
case kPPC_I16x8UConvertI8x16Low:
case kPPC_I16x8UConvertI8x16High:
- case kPPC_I16x8AddSaturateS:
- case kPPC_I16x8SubSaturateS:
- case kPPC_I16x8AddSaturateU:
- case kPPC_I16x8SubSaturateU:
+ case kPPC_I16x8AddSatS:
+ case kPPC_I16x8SubSatS:
+ case kPPC_I16x8AddSatU:
+ case kPPC_I16x8SubSatU:
case kPPC_I16x8RoundingAverageU:
+ case kPPC_I16x8BitMask:
case kPPC_I8x16Splat:
case kPPC_I8x16ExtractLaneU:
case kPPC_I8x16ExtractLaneS:
@@ -270,13 +273,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I8x16Abs:
case kPPC_I8x16SConvertI16x8:
case kPPC_I8x16UConvertI16x8:
- case kPPC_I8x16AddSaturateS:
- case kPPC_I8x16SubSaturateS:
- case kPPC_I8x16AddSaturateU:
- case kPPC_I8x16SubSaturateU:
+ case kPPC_I8x16AddSatS:
+ case kPPC_I8x16SubSatS:
+ case kPPC_I8x16AddSatU:
+ case kPPC_I8x16SubSatU:
case kPPC_I8x16RoundingAverageU:
case kPPC_I8x16Shuffle:
case kPPC_I8x16Swizzle:
+ case kPPC_I8x16BitMask:
case kPPC_V64x2AnyTrue:
case kPPC_V32x4AnyTrue:
case kPPC_V16x8AnyTrue:
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index 0c61821cf5..9c66d6f733 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -314,7 +314,7 @@ void InstructionSelector::VisitStore(Node* node) {
CHECK_EQ(is_atomic, false);
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
ImmediateMode mode = kInt16Imm;
switch (rep) {
case MachineRepresentation::kFloat32:
@@ -359,7 +359,6 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
if (g.CanBeImmediate(offset, mode)) {
@@ -465,7 +464,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
Int32BinopMatcher m(node);
int mb = 0;
int me = 0;
- if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
+ if (m.right().HasResolvedValue() &&
+ IsContiguousMask32(m.right().ResolvedValue(), &mb, &me)) {
int sh = 0;
Node* left = m.left().node();
if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
@@ -474,7 +474,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().IsInRange(0, 31)) {
left = mleft.left().node();
- sh = mleft.right().Value();
+ sh = mleft.right().ResolvedValue();
if (m.left().IsWord32Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 31 - sh) mb = 31 - sh;
@@ -503,7 +503,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher m(node);
int mb = 0;
int me = 0;
- if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
+ if (m.right().HasResolvedValue() &&
+ IsContiguousMask64(m.right().ResolvedValue(), &mb, &me)) {
int sh = 0;
Node* left = m.left().node();
if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
@@ -512,7 +513,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().IsInRange(0, 63)) {
left = mleft.left().node();
- sh = mleft.right().Value();
+ sh = mleft.right().ResolvedValue();
if (m.left().IsWord64Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
@@ -626,11 +627,11 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
// Try to absorb logical-and into rlwinm
Int32BinopMatcher mleft(m.left().node());
- int sh = m.right().Value();
+ int sh = m.right().ResolvedValue();
int mb;
int me;
- if (mleft.right().HasValue() &&
- IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
+ if (mleft.right().HasResolvedValue() &&
+ IsContiguousMask32(mleft.right().ResolvedValue() << sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
if (mb >= me) {
@@ -652,11 +653,11 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
// Try to absorb logical-and into rldic
Int64BinopMatcher mleft(m.left().node());
- int sh = m.right().Value();
+ int sh = m.right().ResolvedValue();
int mb;
int me;
- if (mleft.right().HasValue() &&
- IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
+ if (mleft.right().HasResolvedValue() &&
+ IsContiguousMask64(mleft.right().ResolvedValue() << sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
if (mb >= me) {
@@ -695,11 +696,12 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
// Try to absorb logical-and into rlwinm
Int32BinopMatcher mleft(m.left().node());
- int sh = m.right().Value();
+ int sh = m.right().ResolvedValue();
int mb;
int me;
- if (mleft.right().HasValue() &&
- IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+ if (mleft.right().HasResolvedValue() &&
+ IsContiguousMask32((uint32_t)(mleft.right().ResolvedValue()) >> sh, &mb,
+ &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 31 - sh) mb = 31 - sh;
sh = (32 - sh) & 0x1F;
@@ -721,11 +723,12 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
// Try to absorb logical-and into rldic
Int64BinopMatcher mleft(m.left().node());
- int sh = m.right().Value();
+ int sh = m.right().ResolvedValue();
int mb;
int me;
- if (mleft.right().HasValue() &&
- IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+ if (mleft.right().HasResolvedValue() &&
+ IsContiguousMask64((uint64_t)(mleft.right().ResolvedValue()) >> sh, &mb,
+ &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
sh = (64 - sh) & 0x3F;
@@ -842,7 +845,7 @@ void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
// no register aliasing of input registers with output registers.
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
shift_operand = g.UseImmediate(m.node());
} else {
shift_operand = g.UseUniqueRegister(m.node());
@@ -898,8 +901,8 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
Node* displacement = mleft.displacement();
if (displacement != nullptr) {
Int64Matcher mdisplacement(displacement);
- DCHECK(mdisplacement.HasValue());
- offset = mdisplacement.Value();
+ DCHECK(mdisplacement.HasResolvedValue());
+ offset = mdisplacement.ResolvedValue();
}
offset = SmiWordOffset(offset);
if (g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)) {
@@ -1951,7 +1954,7 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -1965,13 +1968,12 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
opcode = kPPC_AtomicExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kPPC_AtomicExchangeUint8;
@@ -1983,7 +1985,6 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
opcode = kPPC_AtomicExchangeWord64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
@@ -2015,7 +2016,7 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2028,14 +2029,13 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
opcode = kPPC_AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Uint8()) {
opcode = kPPC_AtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
@@ -2046,7 +2046,6 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
opcode = kPPC_AtomicCompareExchangeWord64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
@@ -2062,7 +2061,7 @@ void VisitAtomicBinaryOperation(InstructionSelector* selector, Node* node,
Node* value = node->InputAt(2);
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = int8_op;
@@ -2082,7 +2081,6 @@ void VisitAtomicBinaryOperation(InstructionSelector* selector, Node* node,
opcode = uint64_op;
} else {
UNREACHABLE();
- return;
}
AddressingMode addressing_mode = kMode_MRR;
@@ -2191,6 +2189,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4GeS) \
V(I32x4GtU) \
V(I32x4GeU) \
+ V(I32x4DotI16x8S) \
V(I16x8Add) \
V(I16x8AddHoriz) \
V(I16x8Sub) \
@@ -2207,10 +2206,10 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8GeU) \
V(I16x8SConvertI32x4) \
V(I16x8UConvertI32x4) \
- V(I16x8AddSaturateS) \
- V(I16x8SubSaturateS) \
- V(I16x8AddSaturateU) \
- V(I16x8SubSaturateU) \
+ V(I16x8AddSatS) \
+ V(I16x8SubSatS) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
V(I16x8RoundingAverageU) \
V(I8x16Add) \
V(I8x16Sub) \
@@ -2227,10 +2226,10 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16GeU) \
V(I8x16SConvertI16x8) \
V(I8x16UConvertI16x8) \
- V(I8x16AddSaturateS) \
- V(I8x16SubSaturateS) \
- V(I8x16AddSaturateU) \
- V(I8x16SubSaturateU) \
+ V(I8x16AddSatS) \
+ V(I8x16SubSatS) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU) \
V(I8x16RoundingAverageU) \
V(I8x16Swizzle) \
V(S128And) \
@@ -2323,13 +2322,13 @@ SIMD_VISIT_EXTRACT_LANE(I8x16, U)
SIMD_VISIT_EXTRACT_LANE(I8x16, S)
#undef SIMD_VISIT_EXTRACT_LANE
-#define SIMD_VISIT_REPLACE_LANE(Type) \
- void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
- PPCOperandGenerator g(this); \
- int32_t lane = OpParameter<int32_t>(node->op()); \
- Emit(kPPC_##Type##ReplaceLane, g.DefineAsRegister(node), \
- g.UseUniqueRegister(node->InputAt(0)), g.UseImmediate(lane), \
- g.UseUniqueRegister(node->InputAt(1))); \
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ PPCOperandGenerator g(this); \
+ int32_t lane = OpParameter<int32_t>(node->op()); \
+ Emit(kPPC_##Type##ReplaceLane, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), \
+ g.UseRegister(node->InputAt(1))); \
}
SIMD_TYPES(SIMD_VISIT_REPLACE_LANE)
#undef SIMD_VISIT_REPLACE_LANE
@@ -2378,6 +2377,18 @@ SIMD_SHIFT_LIST(SIMD_VISIT_SHIFT)
SIMD_BOOL_LIST(SIMD_VISIT_BOOL)
#undef SIMD_VISIT_BOOL
#undef SIMD_BOOL_LIST
+
+#define SIMD_VISIT_BITMASK(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ PPCOperandGenerator g(this); \
+ InstructionOperand temps[] = {g.TempRegister()}; \
+ Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \
+ }
+SIMD_VISIT_BITMASK(I8x16BitMask)
+SIMD_VISIT_BITMASK(I16x8BitMask)
+SIMD_VISIT_BITMASK(I32x4BitMask)
+#undef SIMD_VISIT_BITMASK
#undef SIMD_TYPES
void InstructionSelector::VisitI8x16Shuffle(Node* node) {
@@ -2419,12 +2430,6 @@ void InstructionSelector::VisitS128Select(Node* node) {
void InstructionSelector::VisitS128Const(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16BitMask(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8BitMask(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4BitMask(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::EmitPrepareResults(
ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
Node* node) {
diff --git a/deps/v8/src/compiler/backend/register-allocator-verifier.h b/deps/v8/src/compiler/backend/register-allocator-verifier.h
index 6a99775e57..11bd4924f4 100644
--- a/deps/v8/src/compiler/backend/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/backend/register-allocator-verifier.h
@@ -54,14 +54,14 @@ enum AssessmentKind { Final, Pending };
class Assessment : public ZoneObject {
public:
+ Assessment(const Assessment&) = delete;
+ Assessment& operator=(const Assessment&) = delete;
+
AssessmentKind kind() const { return kind_; }
protected:
explicit Assessment(AssessmentKind kind) : kind_(kind) {}
AssessmentKind kind_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Assessment);
};
// PendingAssessments are associated to operands coming from the multiple
@@ -80,6 +80,9 @@ class PendingAssessment final : public Assessment {
operand_(operand),
aliases_(zone) {}
+ PendingAssessment(const PendingAssessment&) = delete;
+ PendingAssessment& operator=(const PendingAssessment&) = delete;
+
static const PendingAssessment* cast(const Assessment* assessment) {
CHECK(assessment->kind() == Pending);
return static_cast<const PendingAssessment*>(assessment);
@@ -99,8 +102,6 @@ class PendingAssessment final : public Assessment {
const InstructionBlock* const origin_;
InstructionOperand operand_;
ZoneSet<int> aliases_;
-
- DISALLOW_COPY_AND_ASSIGN(PendingAssessment);
};
// FinalAssessments are associated to operands that we know to be a certain
@@ -109,6 +110,8 @@ class FinalAssessment final : public Assessment {
public:
explicit FinalAssessment(int virtual_register)
: Assessment(Final), virtual_register_(virtual_register) {}
+ FinalAssessment(const FinalAssessment&) = delete;
+ FinalAssessment& operator=(const FinalAssessment&) = delete;
int virtual_register() const { return virtual_register_; }
static const FinalAssessment* cast(const Assessment* assessment) {
@@ -118,8 +121,6 @@ class FinalAssessment final : public Assessment {
private:
int virtual_register_;
-
- DISALLOW_COPY_AND_ASSIGN(FinalAssessment);
};
struct OperandAsKeyLess {
@@ -140,6 +141,9 @@ class BlockAssessments : public ZoneObject {
stale_ref_stack_slots_(zone),
spill_slot_delta_(spill_slot_delta),
zone_(zone) {}
+ BlockAssessments(const BlockAssessments&) = delete;
+ BlockAssessments& operator=(const BlockAssessments&) = delete;
+
void Drop(InstructionOperand operand) {
map_.erase(operand);
stale_ref_stack_slots_.erase(operand);
@@ -188,8 +192,6 @@ class BlockAssessments : public ZoneObject {
OperandSet stale_ref_stack_slots_;
int spill_slot_delta_;
Zone* zone_;
-
- DISALLOW_COPY_AND_ASSIGN(BlockAssessments);
};
class RegisterAllocatorVerifier final : public ZoneObject {
@@ -197,6 +199,9 @@ class RegisterAllocatorVerifier final : public ZoneObject {
RegisterAllocatorVerifier(Zone* zone, const RegisterConfiguration* config,
const InstructionSequence* sequence,
const Frame* frame);
+ RegisterAllocatorVerifier(const RegisterAllocatorVerifier&) = delete;
+ RegisterAllocatorVerifier& operator=(const RegisterAllocatorVerifier&) =
+ delete;
void VerifyAssignment(const char* caller_info);
void VerifyGapMoves();
@@ -290,8 +295,6 @@ class RegisterAllocatorVerifier final : public ZoneObject {
int spill_slot_delta_;
// TODO(chromium:725559): remove after we understand this bug's root cause.
const char* caller_info_ = nullptr;
-
- DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorVerifier);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index 30724647c6..c0905b945f 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -1001,8 +1001,8 @@ void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
// that each new use interval either precedes, intersects with or touches
// the last added interval.
DCHECK(start <= first_interval_->end());
- first_interval_->set_start(Min(start, first_interval_->start()));
- first_interval_->set_end(Max(end, first_interval_->end()));
+ first_interval_->set_start(std::min(start, first_interval_->start()));
+ first_interval_->set_end(std::max(end, first_interval_->end()));
}
}
}
@@ -3385,7 +3385,7 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
for (auto active : active_live_ranges()) {
split_conflicting(range, active, [this](LiveRange* updated) {
next_active_ranges_change_ =
- Min(updated->End(), next_active_ranges_change_);
+ std::min(updated->End(), next_active_ranges_change_);
});
}
for (int reg = 0; reg < num_registers(); ++reg) {
@@ -3396,7 +3396,7 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
for (auto inactive : inactive_live_ranges(reg)) {
split_conflicting(range, inactive, [this](LiveRange* updated) {
next_inactive_ranges_change_ =
- Min(updated->End(), next_inactive_ranges_change_);
+ std::min(updated->End(), next_inactive_ranges_change_);
});
}
}
@@ -4129,9 +4129,9 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
LifetimePosition::GapFromInstructionIndex(0);
} else {
use_pos[aliased_reg] =
- Min(block_pos[aliased_reg],
- range->NextLifetimePositionRegisterIsBeneficial(
- current->Start()));
+ std::min(block_pos[aliased_reg],
+ range->NextLifetimePositionRegisterIsBeneficial(
+ current->Start()));
}
}
}
@@ -4157,10 +4157,10 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
if (kSimpleFPAliasing || !check_fp_aliasing()) {
if (is_fixed) {
- block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
- use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+ block_pos[cur_reg] = std::min(block_pos[cur_reg], next_intersection);
+ use_pos[cur_reg] = std::min(block_pos[cur_reg], use_pos[cur_reg]);
} else {
- use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+ use_pos[cur_reg] = std::min(use_pos[cur_reg], next_intersection);
}
} else {
int alias_base_index = -1;
@@ -4171,11 +4171,12 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
int aliased_reg = alias_base_index + aliases;
if (is_fixed) {
block_pos[aliased_reg] =
- Min(block_pos[aliased_reg], next_intersection);
+ std::min(block_pos[aliased_reg], next_intersection);
use_pos[aliased_reg] =
- Min(block_pos[aliased_reg], use_pos[aliased_reg]);
+ std::min(block_pos[aliased_reg], use_pos[aliased_reg]);
} else {
- use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
+ use_pos[aliased_reg] =
+ std::min(use_pos[aliased_reg], next_intersection);
}
}
}
@@ -4206,8 +4207,9 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
if (spill_mode == SpillMode::kSpillDeferred) {
InstructionBlock* deferred_block =
code()->GetInstructionBlock(current->Start().ToInstructionIndex());
- new_end = Min(new_end, LifetimePosition::GapFromInstructionIndex(
- LastDeferredInstructionIndex(deferred_block)));
+ new_end =
+ std::min(new_end, LifetimePosition::GapFromInstructionIndex(
+ LastDeferredInstructionIndex(deferred_block)));
}
// We couldn't spill until the next register use. Split before the register
@@ -4315,7 +4317,7 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current,
if (next_pos == nullptr) {
SpillAfter(range, split_pos, spill_mode);
} else {
- next_intersection = Min(next_intersection, next_pos->pos());
+ next_intersection = std::min(next_intersection, next_pos->pos());
SpillBetween(range, split_pos, next_intersection, spill_mode);
}
it = InactiveToHandled(it);
@@ -4407,17 +4409,18 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
// second part, as that likely is the current position of the register
// allocator and we cannot add ranges to unhandled that start before
// the current position.
- LifetimePosition split_start = Max(second_part->Start().End(), until);
+ LifetimePosition split_start = std::max(second_part->Start().End(), until);
// If end is an actual use (which it typically is) we have to split
// so that there is a gap before so that we have space for moving the
// value into its position.
// However, if we have no choice, split right where asked.
- LifetimePosition third_part_end = Max(split_start, end.PrevStart().End());
+ LifetimePosition third_part_end =
+ std::max(split_start, end.PrevStart().End());
// Instead of spliting right after or even before the block boundary,
// split on the boumndary to avoid extra moves.
if (data()->IsBlockBoundary(end.Start())) {
- third_part_end = Max(split_start, end.Start());
+ third_part_end = std::max(split_start, end.Start());
}
LiveRange* third_part =
diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h
index 87c0afbcfc..858fac8a4e 100644
--- a/deps/v8/src/compiler/backend/register-allocator.h
+++ b/deps/v8/src/compiler/backend/register-allocator.h
@@ -185,6 +185,10 @@ class TopLevelLiveRange;
class TopTierRegisterAllocationData final : public RegisterAllocationData {
public:
+ TopTierRegisterAllocationData(const TopTierRegisterAllocationData&) = delete;
+ TopTierRegisterAllocationData& operator=(
+ const TopTierRegisterAllocationData&) = delete;
+
static const TopTierRegisterAllocationData* cast(
const RegisterAllocationData* data) {
DCHECK_EQ(data->type(), Type::kTopTier);
@@ -374,8 +378,6 @@ class TopTierRegisterAllocationData final : public RegisterAllocationData {
ZoneVector<ZoneVector<LiveRange*>> spill_state_;
RegisterAllocationFlags flags_;
TickCounter* const tick_counter_;
-
- DISALLOW_COPY_AND_ASSIGN(TopTierRegisterAllocationData);
};
// Representation of the non-empty interval [start,end[.
@@ -385,6 +387,8 @@ class UseInterval final : public ZoneObject {
: start_(start), end_(end), next_(nullptr) {
DCHECK(start < end);
}
+ UseInterval(const UseInterval&) = delete;
+ UseInterval& operator=(const UseInterval&) = delete;
LifetimePosition start() const { return start_; }
void set_start(LifetimePosition start) { start_ = start; }
@@ -431,8 +435,6 @@ class UseInterval final : public ZoneObject {
LifetimePosition start_;
LifetimePosition end_;
UseInterval* next_;
-
- DISALLOW_COPY_AND_ASSIGN(UseInterval);
};
enum class UsePositionType : uint8_t {
@@ -456,6 +458,8 @@ class V8_EXPORT_PRIVATE UsePosition final
public:
UsePosition(LifetimePosition pos, InstructionOperand* operand, void* hint,
UsePositionHintType hint_type);
+ UsePosition(const UsePosition&) = delete;
+ UsePosition& operator=(const UsePosition&) = delete;
InstructionOperand* operand() const { return operand_; }
bool HasOperand() const { return operand_ != nullptr; }
@@ -507,8 +511,6 @@ class V8_EXPORT_PRIVATE UsePosition final
UsePosition* next_;
LifetimePosition const pos_;
uint32_t flags_;
-
- DISALLOW_COPY_AND_ASSIGN(UsePosition);
};
class SpillRange;
@@ -520,6 +522,9 @@ class LiveRangeBundle;
// intervals over the instruction ordering.
class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
public:
+ LiveRange(const LiveRange&) = delete;
+ LiveRange& operator=(const LiveRange&) = delete;
+
UseInterval* first_interval() const { return first_interval_; }
UsePosition* first_pos() const { return first_pos_; }
TopLevelLiveRange* TopLevel() { return top_level_; }
@@ -713,8 +718,6 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
LiveRangeBundle* bundle_ = nullptr;
// Next interval start, relative to the current linear scan position.
LifetimePosition next_start_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveRange);
};
struct LiveRangeOrdering {
@@ -790,6 +793,9 @@ class LiveRangeBundle : public ZoneObject {
class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
public:
explicit TopLevelLiveRange(int vreg, MachineRepresentation rep);
+ TopLevelLiveRange(const TopLevelLiveRange&) = delete;
+ TopLevelLiveRange& operator=(const TopLevelLiveRange&) = delete;
+
int spill_start_index() const { return spill_start_index_; }
bool IsFixed() const { return vreg_ < 0; }
@@ -825,7 +831,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
bits_ = HasSlotUseField::update(bits_, SlotUseKind::kNoSlotUse);
}
void register_slot_use(SlotUseKind value) {
- bits_ = HasSlotUseField::update(bits_, Max(slot_use_kind(), value));
+ bits_ = HasSlotUseField::update(bits_, std::max(slot_use_kind(), value));
}
SlotUseKind slot_use_kind() const { return HasSlotUseField::decode(bits_); }
@@ -895,7 +901,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
InstructionOperand* operand);
void SetSpillOperand(InstructionOperand* operand);
void SetSpillStartIndex(int start) {
- spill_start_index_ = Min(start, spill_start_index_);
+ spill_start_index_ = std::min(start, spill_start_index_);
}
// Omits any moves from spill_move_insertion_locations_ that can be skipped.
@@ -1046,8 +1052,6 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
int spill_start_index_;
UsePosition* last_pos_;
LiveRange* last_child_covers_;
-
- DISALLOW_COPY_AND_ASSIGN(TopLevelLiveRange);
};
struct PrintableLiveRange {
@@ -1062,6 +1066,8 @@ class SpillRange final : public ZoneObject {
public:
static const int kUnassignedSlot = -1;
SpillRange(TopLevelLiveRange* range, Zone* zone);
+ SpillRange(const SpillRange&) = delete;
+ SpillRange& operator=(const SpillRange&) = delete;
UseInterval* interval() const { return use_interval_; }
@@ -1096,8 +1102,6 @@ class SpillRange final : public ZoneObject {
LifetimePosition end_position_;
int assigned_slot_;
int byte_width_;
-
- DISALLOW_COPY_AND_ASSIGN(SpillRange);
};
class LiveRangeBound {
@@ -1106,6 +1110,8 @@ class LiveRangeBound {
: range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
DCHECK(!range->IsEmpty());
}
+ LiveRangeBound(const LiveRangeBound&) = delete;
+ LiveRangeBound& operator=(const LiveRangeBound&) = delete;
bool CanCover(LifetimePosition position) {
return start_ <= position && position < end_;
@@ -1115,9 +1121,6 @@ class LiveRangeBound {
const LifetimePosition start_;
const LifetimePosition end_;
const bool skip_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
};
struct FindResult {
@@ -1128,6 +1131,9 @@ struct FindResult {
class LiveRangeBoundArray {
public:
LiveRangeBoundArray() : length_(0), start_(nullptr) {}
+ LiveRangeBoundArray(const LiveRangeBoundArray&) = delete;
+ LiveRangeBoundArray& operator=(const LiveRangeBoundArray&) = delete;
+
bool ShouldInitialize() { return start_ == nullptr; }
void Initialize(Zone* zone, TopLevelLiveRange* range);
LiveRangeBound* Find(const LifetimePosition position) const;
@@ -1140,14 +1146,15 @@ class LiveRangeBoundArray {
private:
size_t length_;
LiveRangeBound* start_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
};
class LiveRangeFinder {
public:
explicit LiveRangeFinder(const TopTierRegisterAllocationData* data,
Zone* zone);
+ LiveRangeFinder(const LiveRangeFinder&) = delete;
+ LiveRangeFinder& operator=(const LiveRangeFinder&) = delete;
+
LiveRangeBoundArray* ArrayFor(int operand_index);
private:
@@ -1155,13 +1162,13 @@ class LiveRangeFinder {
const int bounds_length_;
LiveRangeBoundArray* const bounds_;
Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
};
class ConstraintBuilder final : public ZoneObject {
public:
explicit ConstraintBuilder(TopTierRegisterAllocationData* data);
+ ConstraintBuilder(const ConstraintBuilder&) = delete;
+ ConstraintBuilder& operator=(const ConstraintBuilder&) = delete;
// Phase 1 : insert moves to account for fixed register operands.
void MeetRegisterConstraints();
@@ -1185,14 +1192,14 @@ class ConstraintBuilder final : public ZoneObject {
void ResolvePhis(const InstructionBlock* block);
TopTierRegisterAllocationData* const data_;
-
- DISALLOW_COPY_AND_ASSIGN(ConstraintBuilder);
};
class LiveRangeBuilder final : public ZoneObject {
public:
explicit LiveRangeBuilder(TopTierRegisterAllocationData* data,
Zone* local_zone);
+ LiveRangeBuilder(const LiveRangeBuilder&) = delete;
+ LiveRangeBuilder& operator=(const LiveRangeBuilder&) = delete;
// Phase 3: compute liveness of all virtual register.
void BuildLiveRanges();
@@ -1264,8 +1271,6 @@ class LiveRangeBuilder final : public ZoneObject {
}
TopTierRegisterAllocationData* const data_;
ZoneMap<InstructionOperand*, UsePosition*> phi_hints_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveRangeBuilder);
};
class BundleBuilder final : public ZoneObject {
@@ -1284,6 +1289,8 @@ class BundleBuilder final : public ZoneObject {
class RegisterAllocator : public ZoneObject {
public:
RegisterAllocator(TopTierRegisterAllocationData* data, RegisterKind kind);
+ RegisterAllocator(const RegisterAllocator&) = delete;
+ RegisterAllocator& operator=(const RegisterAllocator&) = delete;
protected:
using SpillMode = TopTierRegisterAllocationData::SpillMode;
@@ -1352,14 +1359,14 @@ class RegisterAllocator : public ZoneObject {
private:
bool no_combining_;
-
- DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
};
class LinearScanAllocator final : public RegisterAllocator {
public:
LinearScanAllocator(TopTierRegisterAllocationData* data, RegisterKind kind,
Zone* local_zone);
+ LinearScanAllocator(const LinearScanAllocator&) = delete;
+ LinearScanAllocator& operator=(const LinearScanAllocator&) = delete;
// Phase 4: compute register assignments.
void AllocateRegisters();
@@ -1506,13 +1513,13 @@ class LinearScanAllocator final : public RegisterAllocator {
#ifdef DEBUG
LifetimePosition allocation_finger_;
#endif
-
- DISALLOW_COPY_AND_ASSIGN(LinearScanAllocator);
};
class OperandAssigner final : public ZoneObject {
public:
explicit OperandAssigner(TopTierRegisterAllocationData* data);
+ OperandAssigner(const OperandAssigner&) = delete;
+ OperandAssigner& operator=(const OperandAssigner&) = delete;
// Phase 5: final decision on spilling mode.
void DecideSpillingMode();
@@ -1527,13 +1534,13 @@ class OperandAssigner final : public ZoneObject {
TopTierRegisterAllocationData* data() const { return data_; }
TopTierRegisterAllocationData* const data_;
-
- DISALLOW_COPY_AND_ASSIGN(OperandAssigner);
};
class ReferenceMapPopulator final : public ZoneObject {
public:
explicit ReferenceMapPopulator(TopTierRegisterAllocationData* data);
+ ReferenceMapPopulator(const ReferenceMapPopulator&) = delete;
+ ReferenceMapPopulator& operator=(const ReferenceMapPopulator&) = delete;
// Phase 10: compute values for pointer maps.
void PopulateReferenceMaps();
@@ -1544,8 +1551,6 @@ class ReferenceMapPopulator final : public ZoneObject {
bool SafePointsAreInOrder() const;
TopTierRegisterAllocationData* const data_;
-
- DISALLOW_COPY_AND_ASSIGN(ReferenceMapPopulator);
};
class LiveRangeBoundArray;
@@ -1559,6 +1564,8 @@ class LiveRangeBoundArray;
class LiveRangeConnector final : public ZoneObject {
public:
explicit LiveRangeConnector(TopTierRegisterAllocationData* data);
+ LiveRangeConnector(const LiveRangeConnector&) = delete;
+ LiveRangeConnector& operator=(const LiveRangeConnector&) = delete;
// Phase 8: reconnect split ranges with moves, when the control flow
// between the ranges is trivial (no branches).
@@ -1587,8 +1594,6 @@ class LiveRangeConnector final : public ZoneObject {
Zone* temp_zone);
TopTierRegisterAllocationData* const data_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveRangeConnector);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index f3ab25630f..4b51bb74b7 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -1378,7 +1378,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeObject(reg);
} else {
@@ -1424,7 +1424,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeObject(reg);
} else {
@@ -1459,7 +1459,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Jump(reg);
frame_access_state()->ClearSPDelta();
@@ -1588,8 +1588,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- CodeGenResult result = AssembleDeoptimizerCall(exit);
- if (result != kSuccess) return result;
+ __ b(exit->label());
break;
}
case kArchRet:
@@ -3422,24 +3421,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(3));
break;
}
- case kS390_I64x2MinS: {
- __ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(3));
- break;
- }
case kS390_I32x4MinS: {
__ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(2));
break;
}
- case kS390_I64x2MinU: {
- __ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(3));
- break;
- }
case kS390_I32x4MinU: {
__ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
@@ -3470,24 +3457,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0));
break;
}
- case kS390_I64x2MaxS: {
- __ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(3));
- break;
- }
case kS390_I32x4MaxS: {
__ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(2));
break;
}
- case kS390_I64x2MaxU: {
- __ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(3));
- break;
- }
case kS390_I32x4MaxU: {
__ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
@@ -3552,14 +3527,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(0), Condition(2));
break;
}
- case kS390_I64x2Ne: {
- __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- __ vno(i.OutputSimd128Register(), i.OutputSimd128Register(),
- i.OutputSimd128Register(), Condition(0), Condition(0),
- Condition(3));
- break;
- }
case kS390_I32x4Ne: {
__ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
@@ -3596,25 +3563,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
break;
}
- case kS390_I64x2GtS: {
- __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- break;
- }
case kS390_I32x4GtS: {
__ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
break;
}
- case kS390_I64x2GeS: {
- __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchDoubleReg, Condition(0), Condition(0), Condition(3));
- break;
- }
case kS390_I32x4GeS: {
__ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
@@ -3624,25 +3577,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchDoubleReg, Condition(0), Condition(0), Condition(2));
break;
}
- case kS390_I64x2GtU: {
- __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- break;
- }
case kS390_I32x4GtU: {
__ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
break;
}
- case kS390_I64x2GeU: {
- __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchDoubleReg, Condition(0), Condition(0), Condition(3));
- break;
- }
case kS390_I32x4GeU: {
__ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
@@ -3867,7 +3806,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
// vector boolean unops
- case kS390_V64x2AnyTrue:
case kS390_V32x4AnyTrue:
case kS390_V16x8AnyTrue:
case kS390_V8x16AnyTrue: {
@@ -3893,10 +3831,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vtm(kScratchDoubleReg, kScratchDoubleReg, Condition(0), Condition(0), \
Condition(0)); \
__ locgr(Condition(8), dst, temp);
- case kS390_V64x2AllTrue: {
- SIMD_ALL_TRUE(3)
- break;
- }
case kS390_V32x4AllTrue: {
SIMD_ALL_TRUE(2)
break;
@@ -4070,12 +4004,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#undef VECTOR_UNPACK
case kS390_I16x8SConvertI32x4:
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ vpks(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Condition(0), Condition(2));
+#else
__ vpks(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
+#endif
break;
case kS390_I8x16SConvertI16x8:
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ vpks(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Condition(0), Condition(1));
+#else
__ vpks(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(1));
+#endif
break;
#define VECTOR_PACK_UNSIGNED(mode) \
Simd128Register tempFPReg = i.ToSimd128Register(instr->TempAt(0)); \
@@ -4084,17 +4028,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vmx(tempFPReg, i.InputSimd128Register(0), kScratchDoubleReg, \
Condition(0), Condition(0), Condition(mode)); \
__ vmx(kScratchDoubleReg, i.InputSimd128Register(1), kScratchDoubleReg, \
- Condition(0), Condition(0), Condition(mode)); \
- __ vpkls(i.OutputSimd128Register(), tempFPReg, kScratchDoubleReg, \
- Condition(0), Condition(mode));
+ Condition(0), Condition(0), Condition(mode));
case kS390_I16x8UConvertI32x4: {
// treat inputs as signed, and saturate to unsigned (negative to 0)
VECTOR_PACK_UNSIGNED(2)
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg,
+ Condition(0), Condition(2));
+#else
+ __ vpkls(i.OutputSimd128Register(), tempFPReg, kScratchDoubleReg,
+ Condition(0), Condition(2));
+#endif
break;
}
case kS390_I8x16UConvertI16x8: {
// treat inputs as signed, and saturate to unsigned (negative to 0)
VECTOR_PACK_UNSIGNED(1)
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg,
+ Condition(0), Condition(1));
+#else
+ __ vpkls(i.OutputSimd128Register(), tempFPReg, kScratchDoubleReg,
+ Condition(0), Condition(1));
+#endif
break;
}
#undef VECTOR_PACK_UNSIGNED
@@ -4115,25 +4071,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(mode)); \
__ op(tempFPReg1, tempFPReg1, tempFPReg2, Condition(0), Condition(0), \
Condition(mode + 1));
- case kS390_I16x8AddSaturateS: {
+ case kS390_I16x8AddSatS: {
BINOP_EXTRACT(va, vuph, vupl, 1)
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(2));
+#else
+ __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
+ Condition(0), Condition(2));
+#endif
break;
}
- case kS390_I16x8SubSaturateS: {
+ case kS390_I16x8SubSatS: {
BINOP_EXTRACT(vs, vuph, vupl, 1)
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(2));
+#else
+ __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
+ Condition(0), Condition(2));
+#endif
break;
}
- case kS390_I16x8AddSaturateU: {
+ case kS390_I16x8AddSatU: {
BINOP_EXTRACT(va, vuplh, vupll, 1)
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(2));
+#else
+ __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
+ Condition(0), Condition(2));
+#endif
break;
}
- case kS390_I16x8SubSaturateU: {
+ case kS390_I16x8SubSatU: {
BINOP_EXTRACT(vs, vuplh, vupll, 1)
// negative to 0
__ vx(tempFPReg2, tempFPReg2, tempFPReg2, Condition(0), Condition(0),
@@ -4142,29 +4113,49 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(2));
__ vmx(tempFPReg1, tempFPReg2, tempFPReg1, Condition(0), Condition(0),
Condition(2));
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(2));
+#else
+ __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
+ Condition(0), Condition(2));
+#endif
break;
}
- case kS390_I8x16AddSaturateS: {
+ case kS390_I8x16AddSatS: {
BINOP_EXTRACT(va, vuph, vupl, 0)
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(1));
+#else
+ __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
+ Condition(0), Condition(1));
+#endif
break;
}
- case kS390_I8x16SubSaturateS: {
+ case kS390_I8x16SubSatS: {
BINOP_EXTRACT(vs, vuph, vupl, 0)
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(1));
+#else
+ __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
+ Condition(0), Condition(1));
+#endif
break;
}
- case kS390_I8x16AddSaturateU: {
+ case kS390_I8x16AddSatU: {
BINOP_EXTRACT(va, vuplh, vupll, 0)
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(1));
+#else
+ __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
+ Condition(0), Condition(1));
+#endif
break;
}
- case kS390_I8x16SubSaturateU: {
+ case kS390_I8x16SubSatU: {
BINOP_EXTRACT(vs, vuplh, vupll, 0)
// negative to 0
__ vx(tempFPReg2, tempFPReg2, tempFPReg2, Condition(0), Condition(0),
@@ -4173,8 +4164,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(1));
__ vmx(tempFPReg1, tempFPReg2, tempFPReg1, Condition(0), Condition(0),
Condition(1));
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(1));
+#else
+ __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
+ Condition(0), Condition(1));
+
+#endif
break;
}
#undef BINOP_EXTRACT
@@ -4202,20 +4199,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
+ Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
+ // Saturate the indices to 5 bits. Input indices more than 31 should
+ // return 0.
+ __ vrepi(kScratchDoubleReg, Operand(31), Condition(0));
+ __ vmnl(tempFPReg1, src1, kScratchDoubleReg, Condition(0), Condition(0),
+ Condition(0));
#ifdef V8_TARGET_BIG_ENDIAN
// input needs to be reversed
__ vlgv(r0, src0, MemOperand(r0, 0), Condition(3));
__ vlgv(r1, src0, MemOperand(r0, 1), Condition(3));
__ lrvgr(r0, r0);
__ lrvgr(r1, r1);
- __ vlvgp(kScratchDoubleReg, r1, r0);
- // clear scr0
- __ vx(src0, src0, src0, Condition(0), Condition(0), Condition(0));
- __ vperm(dst, kScratchDoubleReg, src0, src1, Condition(0), Condition(0));
+ __ vlvgp(dst, r1, r0);
+ // clear scratch
+ __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(0));
+ __ vperm(dst, dst, kScratchDoubleReg, tempFPReg1, Condition(0),
+ Condition(0));
#else
__ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
Condition(0), Condition(0), Condition(0));
- __ vperm(dst, src0, kScratchDoubleReg, src1, Condition(0), Condition(0));
+ __ vperm(dst, src0, kScratchDoubleReg, tempFPReg1, Condition(0),
+ Condition(0));
#endif
break;
}
@@ -4743,7 +4749,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
void CodeGenerator::FinishCode() {}
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
index ab7973c089..f7d3370e50 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -261,15 +261,6 @@ namespace compiler {
V(S390_I64x2ReplaceLane) \
V(S390_I64x2ExtractLane) \
V(S390_I64x2Eq) \
- V(S390_I64x2Ne) \
- V(S390_I64x2GtS) \
- V(S390_I64x2GeS) \
- V(S390_I64x2GtU) \
- V(S390_I64x2GeU) \
- V(S390_I64x2MinS) \
- V(S390_I64x2MinU) \
- V(S390_I64x2MaxS) \
- V(S390_I64x2MaxU) \
V(S390_I32x4Splat) \
V(S390_I32x4ExtractLane) \
V(S390_I32x4ReplaceLane) \
@@ -328,10 +319,10 @@ namespace compiler {
V(S390_I16x8SConvertI8x16High) \
V(S390_I16x8UConvertI8x16Low) \
V(S390_I16x8UConvertI8x16High) \
- V(S390_I16x8AddSaturateS) \
- V(S390_I16x8SubSaturateS) \
- V(S390_I16x8AddSaturateU) \
- V(S390_I16x8SubSaturateU) \
+ V(S390_I16x8AddSatS) \
+ V(S390_I16x8SubSatS) \
+ V(S390_I16x8AddSatU) \
+ V(S390_I16x8SubSatU) \
V(S390_I16x8RoundingAverageU) \
V(S390_I16x8Abs) \
V(S390_I16x8BitMask) \
@@ -358,20 +349,18 @@ namespace compiler {
V(S390_I8x16Neg) \
V(S390_I8x16SConvertI16x8) \
V(S390_I8x16UConvertI16x8) \
- V(S390_I8x16AddSaturateS) \
- V(S390_I8x16SubSaturateS) \
- V(S390_I8x16AddSaturateU) \
- V(S390_I8x16SubSaturateU) \
+ V(S390_I8x16AddSatS) \
+ V(S390_I8x16SubSatS) \
+ V(S390_I8x16AddSatU) \
+ V(S390_I8x16SubSatU) \
V(S390_I8x16RoundingAverageU) \
V(S390_I8x16Abs) \
V(S390_I8x16BitMask) \
V(S390_I8x16Shuffle) \
V(S390_I8x16Swizzle) \
- V(S390_V64x2AnyTrue) \
V(S390_V32x4AnyTrue) \
V(S390_V16x8AnyTrue) \
V(S390_V8x16AnyTrue) \
- V(S390_V64x2AllTrue) \
V(S390_V32x4AllTrue) \
V(S390_V16x8AllTrue) \
V(S390_V8x16AllTrue) \
diff --git a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
index c0a854b7f1..be0b14c796 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
@@ -207,15 +207,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I64x2ReplaceLane:
case kS390_I64x2ExtractLane:
case kS390_I64x2Eq:
- case kS390_I64x2Ne:
- case kS390_I64x2GtS:
- case kS390_I64x2GeS:
- case kS390_I64x2GtU:
- case kS390_I64x2GeU:
- case kS390_I64x2MinS:
- case kS390_I64x2MinU:
- case kS390_I64x2MaxS:
- case kS390_I64x2MaxU:
case kS390_I32x4Splat:
case kS390_I32x4ExtractLane:
case kS390_I32x4ReplaceLane:
@@ -274,10 +265,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I16x8SConvertI8x16High:
case kS390_I16x8UConvertI8x16Low:
case kS390_I16x8UConvertI8x16High:
- case kS390_I16x8AddSaturateS:
- case kS390_I16x8SubSaturateS:
- case kS390_I16x8AddSaturateU:
- case kS390_I16x8SubSaturateU:
+ case kS390_I16x8AddSatS:
+ case kS390_I16x8SubSatS:
+ case kS390_I16x8AddSatU:
+ case kS390_I16x8SubSatU:
case kS390_I16x8RoundingAverageU:
case kS390_I16x8Abs:
case kS390_I16x8BitMask:
@@ -304,20 +295,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I8x16Neg:
case kS390_I8x16SConvertI16x8:
case kS390_I8x16UConvertI16x8:
- case kS390_I8x16AddSaturateS:
- case kS390_I8x16SubSaturateS:
- case kS390_I8x16AddSaturateU:
- case kS390_I8x16SubSaturateU:
+ case kS390_I8x16AddSatS:
+ case kS390_I8x16SubSatS:
+ case kS390_I8x16AddSatU:
+ case kS390_I8x16SubSatU:
case kS390_I8x16RoundingAverageU:
case kS390_I8x16Abs:
case kS390_I8x16BitMask:
case kS390_I8x16Shuffle:
case kS390_I8x16Swizzle:
- case kS390_V64x2AnyTrue:
case kS390_V32x4AnyTrue:
case kS390_V16x8AnyTrue:
case kS390_V8x16AnyTrue:
- case kS390_V64x2AllTrue:
case kS390_V32x4AllTrue:
case kS390_V16x8AllTrue:
case kS390_V8x16AllTrue:
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index ee3e996169..124193f50b 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -272,7 +272,7 @@ bool S390OpcodeOnlySupport12BitDisp(InstructionCode op) {
ArchOpcode SelectLoadOpcode(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kS390_LoadFloat32;
@@ -747,7 +747,7 @@ static void VisitGeneralStore(
code |= MiscField::encode(static_cast<int>(record_write_mode));
selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
NodeMatcher m(value);
switch (rep) {
case MachineRepresentation::kFloat32:
@@ -799,7 +799,6 @@ static void VisitGeneralStore(
break;
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
InstructionOperand inputs[4];
size_t input_count = 0;
@@ -899,7 +898,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher m(node);
int mb = 0;
int me = 0;
- if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
+ if (m.right().HasResolvedValue() &&
+ IsContiguousMask64(m.right().ResolvedValue(), &mb, &me)) {
int sh = 0;
Node* left = m.left().node();
if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
@@ -907,7 +907,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().IsInRange(0, 63)) {
left = mleft.left().node();
- sh = mleft.right().Value();
+ sh = mleft.right().ResolvedValue();
if (m.left().IsWord64Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
@@ -951,11 +951,11 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
// TODO(mbrandy): eliminate left sign extension if right >= 32
if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
Int64BinopMatcher mleft(m.left().node());
- int sh = m.right().Value();
+ int sh = m.right().ResolvedValue();
int mb;
int me;
- if (mleft.right().HasValue() &&
- IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
+ if (mleft.right().HasResolvedValue() &&
+ IsContiguousMask64(mleft.right().ResolvedValue() << sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
if (mb >= me) {
@@ -992,11 +992,12 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
Int64BinopMatcher mleft(m.left().node());
- int sh = m.right().Value();
+ int sh = m.right().ResolvedValue();
int mb;
int me;
- if (mleft.right().HasValue() &&
- IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+ if (mleft.right().HasResolvedValue() &&
+ IsContiguousMask64((uint64_t)(mleft.right().ResolvedValue()) >> sh, &mb,
+ &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
sh = (64 - sh) & 0x3F;
@@ -1120,7 +1121,7 @@ void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
// no register aliasing of input registers with output registers.
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
shift_operand = g.UseImmediate(m.node());
} else {
shift_operand = g.UseUniqueRegister(m.node());
@@ -2302,7 +2303,7 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -2316,13 +2317,12 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kS390_Word64AtomicExchangeUint8;
@@ -2334,7 +2334,6 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
opcode = kS390_Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
@@ -2372,7 +2371,7 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2385,14 +2384,13 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Uint8()) {
opcode = kS390_Word64AtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
@@ -2403,7 +2401,6 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
opcode = kS390_Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
@@ -2447,7 +2444,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = int8_op;
@@ -2461,7 +2458,6 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
opcode = word32_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinop(this, node, opcode);
}
@@ -2484,7 +2480,7 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode word32_op,
ArchOpcode word64_op) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Uint8()) {
opcode = uint8_op;
@@ -2496,7 +2492,6 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
opcode = word64_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinop(this, node, opcode);
}
@@ -2559,15 +2554,6 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I64x2Sub) \
V(I64x2Mul) \
V(I64x2Eq) \
- V(I64x2Ne) \
- V(I64x2GtS) \
- V(I64x2GeS) \
- V(I64x2GtU) \
- V(I64x2GeU) \
- V(I64x2MinS) \
- V(I64x2MinU) \
- V(I64x2MaxS) \
- V(I64x2MaxU) \
V(I32x4Add) \
V(I32x4AddHoriz) \
V(I32x4Sub) \
@@ -2582,6 +2568,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I32x4GeS) \
V(I32x4GtU) \
V(I32x4GeU) \
+ V(I32x4DotI16x8S) \
V(I16x8Add) \
V(I16x8AddHoriz) \
V(I16x8Sub) \
@@ -2598,10 +2585,10 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I16x8GeU) \
V(I16x8SConvertI32x4) \
V(I16x8UConvertI32x4) \
- V(I16x8AddSaturateS) \
- V(I16x8SubSaturateS) \
- V(I16x8AddSaturateU) \
- V(I16x8SubSaturateU) \
+ V(I16x8AddSatS) \
+ V(I16x8SubSatS) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
V(I16x8RoundingAverageU) \
V(I8x16Add) \
V(I8x16Sub) \
@@ -2618,10 +2605,10 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I8x16GeU) \
V(I8x16SConvertI16x8) \
V(I8x16UConvertI16x8) \
- V(I8x16AddSaturateS) \
- V(I8x16SubSaturateS) \
- V(I8x16AddSaturateU) \
- V(I8x16SubSaturateU) \
+ V(I8x16AddSatS) \
+ V(I8x16SubSatS) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU) \
V(I8x16RoundingAverageU) \
V(S128And) \
V(S128Or) \
@@ -2677,11 +2664,9 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I8x16ShrU)
#define SIMD_BOOL_LIST(V) \
- V(V64x2AnyTrue) \
V(V32x4AnyTrue) \
V(V16x8AnyTrue) \
V(V8x16AnyTrue) \
- V(V64x2AllTrue) \
V(V32x4AllTrue) \
V(V16x8AllTrue) \
V(V8x16AllTrue)
@@ -2855,9 +2840,10 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
void InstructionSelector::VisitI8x16Swizzle(Node* node) {
S390OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
Emit(kS390_I8x16Swizzle, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)));
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
}
void InstructionSelector::VisitS128Const(Node* node) {
diff --git a/deps/v8/src/compiler/backend/spill-placer.h b/deps/v8/src/compiler/backend/spill-placer.h
index 99181d074e..94a5358384 100644
--- a/deps/v8/src/compiler/backend/spill-placer.h
+++ b/deps/v8/src/compiler/backend/spill-placer.h
@@ -80,6 +80,9 @@ class SpillPlacer {
~SpillPlacer();
+ SpillPlacer(const SpillPlacer&) = delete;
+ SpillPlacer& operator=(const SpillPlacer&) = delete;
+
// Adds the given TopLevelLiveRange to the SpillPlacer's state. Will
// eventually commit spill moves for that range and mark the range to indicate
// whether its value is spilled at the definition or some later point, so that
@@ -158,8 +161,6 @@ class SpillPlacer {
// additional work.
RpoNumber first_block_ = RpoNumber::Invalid();
RpoNumber last_block_ = RpoNumber::Invalid();
-
- DISALLOW_COPY_AND_ASSIGN(SpillPlacer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index e0cf602b11..df1d6de835 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -487,13 +487,19 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} \
} while (false)
-#define ASSEMBLE_SSE_BINOP(asm_instr) \
- do { \
- if (instr->InputAt(1)->IsFPRegister()) { \
- __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
- } else { \
- __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
- } \
+#define ASSEMBLE_SSE_BINOP(asm_instr) \
+ do { \
+ if (HasAddressingMode(instr)) { \
+ size_t index = 1; \
+ Operand right = i.MemoryOperand(&index); \
+ __ asm_instr(i.InputDoubleRegister(0), right); \
+ } else { \
+ if (instr->InputAt(1)->IsFPRegister()) { \
+ __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
+ } else { \
+ __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
+ } \
+ } \
} while (false)
#define ASSEMBLE_SSE_UNOP(asm_instr) \
@@ -505,16 +511,22 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} \
} while (false)
-#define ASSEMBLE_AVX_BINOP(asm_instr) \
- do { \
- CpuFeatureScope avx_scope(tasm(), AVX); \
- if (instr->InputAt(1)->IsFPRegister()) { \
- __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
- i.InputDoubleRegister(1)); \
- } else { \
- __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
- i.InputOperand(1)); \
- } \
+#define ASSEMBLE_AVX_BINOP(asm_instr) \
+ do { \
+ CpuFeatureScope avx_scope(tasm(), AVX); \
+ if (HasAddressingMode(instr)) { \
+ size_t index = 1; \
+ Operand right = i.MemoryOperand(&index); \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), right); \
+ } else { \
+ if (instr->InputAt(1)->IsFPRegister()) { \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ } else { \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+ i.InputOperand(1)); \
+ } \
+ } \
} while (false)
#define ASSEMBLE_IEEE754_BINOP(name) \
@@ -553,6 +565,21 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ j(not_equal, &binop); \
} while (false)
+// Handles both SSE and AVX codegen. For SSE we use DefineSameAsFirst, so the
+// dst and first src will be the same. For AVX we don't restrict it that way, so
+// we will omit unnecessary moves.
+#define ASSEMBLE_SIMD_BINOP(opcode) \
+ do { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(tasm(), AVX); \
+ __ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputSimd128Register(1)); \
+ } else { \
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
+ __ opcode(i.OutputSimd128Register(), i.InputSimd128Register(1)); \
+ } \
+ } while (false)
+
#define ASSEMBLE_SIMD_INSTR(opcode, dst_operand, index) \
do { \
if (instr->InputAt(index)->IsSimd128Register()) { \
@@ -603,21 +630,53 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
// This macro will directly emit the opcode if the shift is an immediate - the
// shift value will be taken modulo 2^width. Otherwise, it will emit code to
// perform the modulus operation.
-#define ASSEMBLE_SIMD_SHIFT(opcode, width) \
- do { \
- XMMRegister dst = i.OutputSimd128Register(); \
- DCHECK_EQ(dst, i.InputSimd128Register(0)); \
- if (HasImmediateInput(instr, 1)) { \
- __ opcode(dst, byte{i.InputInt##width(1)}); \
- } else { \
- XMMRegister tmp = i.TempSimd128Register(0); \
- Register tmp_shift = i.TempRegister(1); \
- constexpr int mask = (1 << width) - 1; \
- __ movq(tmp_shift, i.InputRegister(1)); \
- __ andq(tmp_shift, Immediate(mask)); \
- __ Movq(tmp, tmp_shift); \
- __ opcode(dst, tmp); \
- } \
+#define ASSEMBLE_SIMD_SHIFT(opcode, width) \
+ do { \
+ XMMRegister dst = i.OutputSimd128Register(); \
+ if (HasImmediateInput(instr, 1)) { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(tasm(), AVX); \
+ __ v##opcode(dst, i.InputSimd128Register(0), \
+ byte{i.InputInt##width(1)}); \
+ } else { \
+ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
+ __ opcode(dst, byte{i.InputInt##width(1)}); \
+ } \
+ } else { \
+ XMMRegister tmp = i.TempSimd128Register(0); \
+ Register tmp_shift = i.TempRegister(1); \
+ constexpr int mask = (1 << width) - 1; \
+ __ movq(tmp_shift, i.InputRegister(1)); \
+ __ andq(tmp_shift, Immediate(mask)); \
+ __ Movq(tmp, tmp_shift); \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(tasm(), AVX); \
+ __ v##opcode(dst, i.InputSimd128Register(0), tmp); \
+ } else { \
+ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
+ __ opcode(dst, tmp); \
+ } \
+ } \
+ } while (false)
+
+#define ASSEMBLE_PINSR(ASM_INSTR) \
+ do { \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ XMMRegister dst = i.OutputSimd128Register(); \
+ XMMRegister src = i.InputSimd128Register(0); \
+ uint8_t laneidx = i.InputUint8(1); \
+ if (HasAddressingMode(instr)) { \
+ __ ASM_INSTR(dst, src, i.MemoryOperand(2), laneidx); \
+ break; \
+ } \
+ if (instr->InputAt(2)->IsFPRegister()) { \
+ __ Movq(kScratchRegister, i.InputDoubleRegister(2)); \
+ __ ASM_INSTR(dst, src, kScratchRegister, laneidx); \
+ } else if (instr->InputAt(2)->IsRegister()) { \
+ __ ASM_INSTR(dst, src, i.InputRegister(2), laneidx); \
+ } else { \
+ __ ASM_INSTR(dst, src, i.InputOperand(2), laneidx); \
+ } \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -664,7 +723,7 @@ void AdjustStackPointerForTailCall(Instruction* instr,
int new_slot_above_sp,
bool allow_shrinkage = true) {
int stack_slot_delta;
- if (HasCallDescriptorFlag(instr, CallDescriptor::kIsTailCallForTierUp)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kIsTailCallForTierUp)) {
// For this special tail-call mode, the callee has the same arguments and
// linkage as the caller, and arguments adapter frames must be preserved.
// Thus we simply have reset the stack pointer register to its original
@@ -710,7 +769,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
if (!pushes.empty() &&
(LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
first_unused_stack_slot)) {
- DCHECK(!HasCallDescriptorFlag(instr, CallDescriptor::kIsTailCallForTierUp));
+ DCHECK(!instr->HasCallDescriptorFlag(CallDescriptor::kIsTailCallForTierUp));
X64OperandConverter g(this, instr);
for (auto move : pushes) {
LocationOperand destination_location(
@@ -800,10 +859,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
@@ -828,7 +887,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
__ near_call(wasm_code, constant.rmode());
} else {
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(wasm_code, constant.rmode());
} else {
__ Call(wasm_code, constant.rmode());
@@ -836,7 +895,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
} else {
Register reg = i.InputRegister(0);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
@@ -847,7 +906,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchTailCallCodeObjectFromJSFunction:
- if (!HasCallDescriptorFlag(instr, CallDescriptor::kIsTailCallForTierUp)) {
+ if (!instr->HasCallDescriptorFlag(CallDescriptor::kIsTailCallForTierUp)) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
i.TempRegister(2));
@@ -860,10 +919,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
@@ -886,7 +945,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
} else {
Register reg = i.InputRegister(0);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
@@ -901,9 +960,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
@@ -1040,9 +1099,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- CodeGenResult result = AssembleDeoptimizerCall(exit);
- if (result != kSuccess) return result;
- unwinding_info_writer_.MarkBlockWillExit();
+ __ jmp(exit->label());
break;
}
case kArchRet:
@@ -2327,15 +2384,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kX64F64x2ReplaceLane: {
- if (instr->InputAt(2)->IsFPRegister()) {
- __ Movq(kScratchRegister, i.InputDoubleRegister(2));
- __ Pinsrq(i.OutputSimd128Register(), kScratchRegister, i.InputInt8(1));
- } else {
- __ Pinsrq(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
- }
- break;
- }
case kX64F64x2ExtractLane: {
__ Pextrq(kScratchRegister, i.InputSimd128Register(0), i.InputInt8(1));
__ Movq(i.OutputDoubleRegister(), kScratchRegister);
@@ -2346,19 +2394,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F64x2Add: {
- ASSEMBLE_SSE_BINOP(Addpd);
+ ASSEMBLE_SIMD_BINOP(addpd);
break;
}
case kX64F64x2Sub: {
- ASSEMBLE_SSE_BINOP(Subpd);
+ ASSEMBLE_SIMD_BINOP(subpd);
break;
}
case kX64F64x2Mul: {
- ASSEMBLE_SSE_BINOP(Mulpd);
+ ASSEMBLE_SIMD_BINOP(mulpd);
break;
}
case kX64F64x2Div: {
- ASSEMBLE_SSE_BINOP(Divpd);
+ ASSEMBLE_SIMD_BINOP(divpd);
break;
}
case kX64F64x2Min: {
@@ -2401,23 +2449,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F64x2Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(cmpeqpd);
break;
}
case kX64F64x2Ne: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(cmpneqpd);
break;
}
case kX64F64x2Lt: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(cmpltpd);
break;
}
case kX64F64x2Le: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(cmplepd);
break;
}
case kX64F64x2Qfma: {
@@ -2446,20 +2490,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- // TODO(gdeepti): Get rid of redundant moves for F32x4Splat/Extract below
case kX64F32x4Splat: {
- XMMRegister dst = i.OutputSimd128Register();
- if (instr->InputAt(0)->IsFPRegister()) {
- __ Movss(dst, i.InputDoubleRegister(0));
- } else {
- __ Movss(dst, i.InputOperand(0));
- }
- __ Shufps(dst, dst, byte{0x0});
+ __ Shufps(i.OutputSimd128Register(), i.InputDoubleRegister(0), 0);
break;
}
case kX64F32x4ExtractLane: {
- __ Extractps(kScratchRegister, i.InputSimd128Register(0), i.InputInt8(1));
- __ Movd(i.OutputDoubleRegister(), kScratchRegister);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister src = i.InputSimd128Register(0);
+ // vshufps and leave junk in the 3 high lanes.
+ __ vshufps(i.OutputDoubleRegister(), src, src, i.InputInt8(1));
+ } else {
+ __ extractps(kScratchRegister, i.InputSimd128Register(0),
+ i.InputUint8(1));
+ __ movd(i.OutputDoubleRegister(), kScratchRegister);
+ }
break;
}
case kX64F32x4ReplaceLane: {
@@ -2533,8 +2578,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F32x4Add: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Addps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(addps);
break;
}
case kX64F32x4AddHoriz: {
@@ -2543,18 +2587,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F32x4Sub: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Subps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(subps);
break;
}
case kX64F32x4Mul: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Mulps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(mulps);
break;
}
case kX64F32x4Div: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Divps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(divps);
break;
}
case kX64F32x4Min: {
@@ -2597,25 +2638,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F32x4Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Cmpps(i.OutputSimd128Register(), i.InputSimd128Register(1),
- int8_t{0x0});
+ ASSEMBLE_SIMD_BINOP(cmpeqps);
break;
}
case kX64F32x4Ne: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Cmpps(i.OutputSimd128Register(), i.InputSimd128Register(1),
- int8_t{0x4});
+ ASSEMBLE_SIMD_BINOP(cmpneqps);
break;
}
case kX64F32x4Lt: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Cmpltps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(cmpltps);
break;
}
case kX64F32x4Le: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(cmpleps);
break;
}
case kX64F32x4Qfma: {
@@ -2694,15 +2729,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pextrq(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
- case kX64I64x2ReplaceLane: {
- if (HasRegisterInput(instr, 2)) {
- __ Pinsrq(i.OutputSimd128Register(), i.InputRegister(2),
- i.InputInt8(1));
- } else {
- __ Pinsrq(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
- }
- break;
- }
case kX64I64x2Neg: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
@@ -2714,9 +2740,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Psubq(dst, src);
break;
}
+ case kX64I64x2BitMask: {
+ __ Movmskpd(i.OutputRegister(), i.InputSimd128Register(0));
+ break;
+ }
case kX64I64x2Shl: {
// Take shift value modulo 2^6.
- ASSEMBLE_SIMD_SHIFT(Psllq, 6);
+ ASSEMBLE_SIMD_SHIFT(psllq, 6);
break;
}
case kX64I64x2ShrS: {
@@ -2730,22 +2760,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// lower quadword
__ Pextrq(tmp, src, int8_t{0x0});
__ sarq_cl(tmp);
- __ Pinsrq(dst, tmp, int8_t{0x0});
+ __ Pinsrq(dst, tmp, uint8_t{0x0});
// upper quadword
__ Pextrq(tmp, src, int8_t{0x1});
__ sarq_cl(tmp);
- __ Pinsrq(dst, tmp, int8_t{0x1});
+ __ Pinsrq(dst, tmp, uint8_t{0x1});
break;
}
case kX64I64x2Add: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Paddq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(paddq);
break;
}
case kX64I64x2Sub: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Psubq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(psubq);
break;
}
case kX64I64x2Mul: {
@@ -2773,177 +2801,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Paddq(left, tmp2); // left == dst
break;
}
- case kX64I64x2MinS: {
- if (CpuFeatures::IsSupported(SSE4_2)) {
- CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src0 = i.InputSimd128Register(0);
- XMMRegister src1 = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
- DCHECK_EQ(tmp, xmm0);
-
- __ movaps(tmp, src1);
- __ pcmpgtq(tmp, src0);
- __ movaps(dst, src1);
- __ blendvpd(dst, src0); // implicit use of xmm0 as mask
- } else {
- CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
- Register tmp1 = i.TempRegister(1);
- Register tmp2 = i.TempRegister(2);
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // backup src since we cannot change it
- __ movaps(tmp, src);
-
- // compare the lower quardwords
- __ movq(tmp1, dst);
- __ movq(tmp2, tmp);
- __ cmpq(tmp1, tmp2);
- // tmp2 now has the min of lower quadwords
- __ cmovq(less_equal, tmp2, tmp1);
- // tmp1 now has the higher quadword
- // must do this before movq, movq clears top quadword
- __ pextrq(tmp1, dst, 1);
- // save tmp2 into dst
- __ movq(dst, tmp2);
- // tmp2 now has the higher quadword
- __ pextrq(tmp2, tmp, 1);
- // compare higher quadwords
- __ cmpq(tmp1, tmp2);
- // tmp2 now has the min of higher quadwords
- __ cmovq(less_equal, tmp2, tmp1);
- __ movq(tmp, tmp2);
- // dst = [tmp[0], dst[0]]
- __ punpcklqdq(dst, tmp);
- }
- break;
- }
- case kX64I64x2MaxS: {
- CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- DCHECK_EQ(tmp, xmm0);
-
- __ movaps(tmp, src);
- __ pcmpgtq(tmp, dst);
- __ blendvpd(dst, src); // implicit use of xmm0 as mask
- break;
- }
case kX64I64x2Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1));
- break;
- }
- case kX64I64x2Ne: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister tmp = i.TempSimd128Register(0);
- __ Pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1));
- __ Pcmpeqq(tmp, tmp);
- __ Pxor(i.OutputSimd128Register(), tmp);
- break;
- }
- case kX64I64x2GtS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Pcmpgtq(i.OutputSimd128Register(), i.InputSimd128Register(1));
- break;
- }
- case kX64I64x2GeS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
-
- __ Movaps(tmp, src);
- __ Pcmpgtq(tmp, dst);
- __ Pcmpeqd(dst, dst);
- __ Pxor(dst, tmp);
+ ASSEMBLE_SIMD_BINOP(pcmpeqq);
break;
}
case kX64I64x2ShrU: {
// Take shift value modulo 2^6.
- ASSEMBLE_SIMD_SHIFT(Psrlq, 6);
- break;
- }
- case kX64I64x2MinU: {
- CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2);
- CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src0 = i.InputSimd128Register(0);
- XMMRegister src1 = i.InputSimd128Register(1);
- XMMRegister tmp0 = i.TempSimd128Register(0);
- XMMRegister tmp1 = i.TempSimd128Register(1);
- DCHECK_EQ(tmp1, xmm0);
-
- __ movaps(dst, src1);
- __ movaps(tmp0, src0);
-
- __ pcmpeqd(tmp1, tmp1);
- __ psllq(tmp1, 63);
-
- __ pxor(tmp0, tmp1);
- __ pxor(tmp1, dst);
-
- __ pcmpgtq(tmp1, tmp0);
- __ blendvpd(dst, src0); // implicit use of xmm0 as mask
- break;
- }
- case kX64I64x2MaxU: {
- CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2);
- CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- XMMRegister dst_tmp = i.TempSimd128Register(0);
- XMMRegister tmp = i.TempSimd128Register(1);
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- DCHECK_EQ(tmp, xmm0);
-
- __ movaps(dst_tmp, dst);
-
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 63);
-
- __ pxor(dst_tmp, tmp);
- __ pxor(tmp, src);
-
- __ pcmpgtq(tmp, dst_tmp);
- __ blendvpd(dst, src); // implicit use of xmm0 as mask
- break;
- }
- case kX64I64x2GtU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
-
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psllq(kScratchDoubleReg, 63);
-
- __ Movaps(tmp, src);
- __ Pxor(tmp, kScratchDoubleReg);
- __ Pxor(dst, kScratchDoubleReg);
- __ Pcmpgtq(dst, tmp);
- break;
- }
- case kX64I64x2GeU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_2);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
-
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psllq(kScratchDoubleReg, 63);
-
- __ Movaps(tmp, src);
- __ Pxor(dst, kScratchDoubleReg);
- __ Pxor(tmp, kScratchDoubleReg);
- __ Pcmpgtq(tmp, dst);
- __ Pcmpeqd(dst, dst);
- __ Pxor(dst, tmp);
+ ASSEMBLE_SIMD_SHIFT(psrlq, 6);
break;
}
case kX64I32x4Splat: {
@@ -2960,15 +2824,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
- case kX64I32x4ReplaceLane: {
- if (HasRegisterInput(instr, 2)) {
- __ Pinsrd(i.OutputSimd128Register(), i.InputRegister(2),
- i.InputInt8(1));
- } else {
- __ Pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
- }
- break;
- }
case kX64I32x4SConvertF32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister dst = i.OutputSimd128Register();
@@ -3012,40 +2867,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I32x4Shl: {
// Take shift value modulo 2^5.
- ASSEMBLE_SIMD_SHIFT(Pslld, 5);
+ ASSEMBLE_SIMD_SHIFT(pslld, 5);
break;
}
case kX64I32x4ShrS: {
// Take shift value modulo 2^5.
- ASSEMBLE_SIMD_SHIFT(Psrad, 5);
+ ASSEMBLE_SIMD_SHIFT(psrad, 5);
break;
}
case kX64I32x4Add: {
- __ Paddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(paddd);
break;
}
case kX64I32x4AddHoriz: {
- __ Phaddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(phaddd);
break;
}
case kX64I32x4Sub: {
- __ Psubd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(psubd);
break;
}
case kX64I32x4Mul: {
- __ Pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmulld);
break;
}
case kX64I32x4MinS: {
- __ Pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pminsd);
break;
}
case kX64I32x4MaxS: {
- __ Pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmaxsd);
break;
}
case kX64I32x4Eq: {
- __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pcmpeqd);
break;
}
case kX64I32x4Ne: {
@@ -3056,7 +2911,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4GtS: {
- __ Pcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pcmpgtd);
break;
}
case kX64I32x4GeS: {
@@ -3076,8 +2931,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Maxps(dst, tmp2);
// scratch: float representation of max_signed
__ Pcmpeqd(tmp2, tmp2);
- __ Psrld(tmp2, uint8_t{1}); // 0x7fffffff
- __ Cvtdq2ps(tmp2, tmp2); // 0x4f000000
+ __ Psrld(tmp2, uint8_t{1}); // 0x7fffffff
+ __ Cvtdq2ps(tmp2, tmp2); // 0x4f000000
// tmp: convert (src-max_signed).
// Positive overflow lanes -> 0x7FFFFFFF
// Negative lanes -> 0
@@ -3106,15 +2961,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I32x4ShrU: {
// Take shift value modulo 2^5.
- ASSEMBLE_SIMD_SHIFT(Psrld, 5);
+ ASSEMBLE_SIMD_SHIFT(psrld, 5);
break;
}
case kX64I32x4MinU: {
- __ Pminud(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pminud);
break;
}
case kX64I32x4MaxU: {
- __ Pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmaxud);
break;
}
case kX64I32x4GtU: {
@@ -3143,7 +2998,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4DotI16x8S: {
- __ Pmaddwd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmaddwd);
break;
}
case kX64S128Const: {
@@ -3159,7 +3014,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64S128Zero: {
XMMRegister dst = i.OutputSimd128Register();
- __ Xorps(dst, dst);
+ __ Pxor(dst, dst);
break;
}
case kX64S128AllOnes: {
@@ -3178,26 +3033,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pshufd(dst, dst, uint8_t{0x0});
break;
}
- case kX64I16x8ExtractLaneU: {
- Register dst = i.OutputRegister();
- __ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
- break;
- }
case kX64I16x8ExtractLaneS: {
Register dst = i.OutputRegister();
- __ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ __ Pextrw(dst, i.InputSimd128Register(0), i.InputUint8(1));
__ movsxwl(dst, dst);
break;
}
- case kX64I16x8ReplaceLane: {
- if (HasRegisterInput(instr, 2)) {
- __ Pinsrw(i.OutputSimd128Register(), i.InputRegister(2),
- i.InputInt8(1));
- } else {
- __ Pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
- }
- break;
- }
case kX64I16x8SConvertI8x16Low: {
__ Pmovsxbw(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
@@ -3222,53 +3063,52 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8Shl: {
// Take shift value modulo 2^4.
- ASSEMBLE_SIMD_SHIFT(Psllw, 4);
+ ASSEMBLE_SIMD_SHIFT(psllw, 4);
break;
}
case kX64I16x8ShrS: {
// Take shift value modulo 2^4.
- ASSEMBLE_SIMD_SHIFT(Psraw, 4);
+ ASSEMBLE_SIMD_SHIFT(psraw, 4);
break;
}
case kX64I16x8SConvertI32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Packssdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(packssdw);
break;
}
case kX64I16x8Add: {
- __ Paddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(paddw);
break;
}
- case kX64I16x8AddSaturateS: {
- __ Paddsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ case kX64I16x8AddSatS: {
+ ASSEMBLE_SIMD_BINOP(paddsw);
break;
}
case kX64I16x8AddHoriz: {
- __ Phaddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(phaddw);
break;
}
case kX64I16x8Sub: {
- __ Psubw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(psubw);
break;
}
- case kX64I16x8SubSaturateS: {
- __ Psubsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ case kX64I16x8SubSatS: {
+ ASSEMBLE_SIMD_BINOP(psubsw);
break;
}
case kX64I16x8Mul: {
- __ Pmullw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmullw);
break;
}
case kX64I16x8MinS: {
- __ Pminsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pminsw);
break;
}
case kX64I16x8MaxS: {
- __ Pmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmaxsw);
break;
}
case kX64I16x8Eq: {
- __ Pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pcmpeqw);
break;
}
case kX64I16x8Ne: {
@@ -3279,7 +3119,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8GtS: {
- __ Pcmpgtw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pcmpgtw);
break;
}
case kX64I16x8GeS: {
@@ -3301,28 +3141,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8ShrU: {
// Take shift value modulo 2^4.
- ASSEMBLE_SIMD_SHIFT(Psrlw, 4);
+ ASSEMBLE_SIMD_SHIFT(psrlw, 4);
break;
}
case kX64I16x8UConvertI32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Packusdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(packusdw);
break;
}
- case kX64I16x8AddSaturateU: {
- __ Paddusw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ case kX64I16x8AddSatU: {
+ ASSEMBLE_SIMD_BINOP(paddusw);
break;
}
- case kX64I16x8SubSaturateU: {
- __ Psubusw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ case kX64I16x8SubSatU: {
+ ASSEMBLE_SIMD_BINOP(psubusw);
break;
}
case kX64I16x8MinU: {
- __ Pminuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pminuw);
break;
}
case kX64I16x8MaxU: {
- __ Pmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmaxuw);
break;
}
case kX64I16x8GtU: {
@@ -3343,7 +3182,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8RoundingAverageU: {
- __ Pavgw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pavgw);
break;
}
case kX64I16x8Abs: {
@@ -3369,29 +3208,56 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pshufb(dst, kScratchDoubleReg);
break;
}
- case kX64I8x16ExtractLaneU: {
- Register dst = i.OutputRegister();
- __ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ case kX64Pextrb: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ size_t index = 0;
+ if (HasAddressingMode(instr)) {
+ Operand operand = i.MemoryOperand(&index);
+ __ Pextrb(operand, i.InputSimd128Register(index),
+ i.InputUint8(index + 1));
+ } else {
+ __ Pextrb(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputUint8(1));
+ }
+ break;
+ }
+ case kX64Pextrw: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ size_t index = 0;
+ if (HasAddressingMode(instr)) {
+ Operand operand = i.MemoryOperand(&index);
+ __ Pextrw(operand, i.InputSimd128Register(index),
+ i.InputUint8(index + 1));
+ } else {
+ __ Pextrw(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputUint8(1));
+ }
break;
}
case kX64I8x16ExtractLaneS: {
Register dst = i.OutputRegister();
- __ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ __ Pextrb(dst, i.InputSimd128Register(0), i.InputUint8(1));
__ movsxbl(dst, dst);
break;
}
- case kX64I8x16ReplaceLane: {
- if (HasRegisterInput(instr, 2)) {
- __ Pinsrb(i.OutputSimd128Register(), i.InputRegister(2),
- i.InputInt8(1));
- } else {
- __ Pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
- }
+ case kX64Pinsrb: {
+ ASSEMBLE_PINSR(Pinsrb);
+ break;
+ }
+ case kX64Pinsrw: {
+ ASSEMBLE_PINSR(Pinsrw);
+ break;
+ }
+ case kX64Pinsrd: {
+ ASSEMBLE_PINSR(Pinsrd);
+ break;
+ }
+ case kX64Pinsrq: {
+ ASSEMBLE_PINSR(Pinsrq);
break;
}
case kX64I8x16SConvertI16x8: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Packsswb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(packsswb);
break;
}
case kX64I8x16Neg: {
@@ -3472,19 +3338,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16Add: {
- __ Paddb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(paddb);
break;
}
- case kX64I8x16AddSaturateS: {
- __ Paddsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ case kX64I8x16AddSatS: {
+ ASSEMBLE_SIMD_BINOP(paddsb);
break;
}
case kX64I8x16Sub: {
- __ Psubb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(psubb);
break;
}
- case kX64I8x16SubSaturateS: {
- __ Psubsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ case kX64I8x16SubSatS: {
+ ASSEMBLE_SIMD_BINOP(psubsb);
break;
}
case kX64I8x16Mul: {
@@ -3521,15 +3387,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16MinS: {
- __ Pminsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pminsb);
break;
}
case kX64I8x16MaxS: {
- __ Pmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmaxsb);
break;
}
case kX64I8x16Eq: {
- __ Pcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pcmpeqb);
break;
}
case kX64I8x16Ne: {
@@ -3540,7 +3406,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16GtS: {
- __ Pcmpgtb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pcmpgtb);
break;
}
case kX64I8x16GeS: {
@@ -3551,8 +3417,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16UConvertI16x8: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Packuswb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(packuswb);
break;
}
case kX64I8x16ShrU: {
@@ -3588,20 +3453,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kX64I8x16AddSaturateU: {
- __ Paddusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ case kX64I8x16AddSatU: {
+ ASSEMBLE_SIMD_BINOP(paddusb);
break;
}
- case kX64I8x16SubSaturateU: {
- __ Psubusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ case kX64I8x16SubSatU: {
+ ASSEMBLE_SIMD_BINOP(psubusb);
break;
}
case kX64I8x16MinU: {
- __ Pminub(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pminub);
break;
}
case kX64I8x16MaxU: {
- __ Pmaxub(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmaxub);
break;
}
case kX64I8x16GtU: {
@@ -3622,7 +3487,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16RoundingAverageU: {
- __ Pavgb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pavgb);
break;
}
case kX64I8x16Abs: {
@@ -3633,16 +3498,50 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pmovmskb(i.OutputRegister(), i.InputSimd128Register(0));
break;
}
+ case kX64I8x16SignSelect: {
+ __ Pblendvb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2));
+ break;
+ }
+ case kX64I16x8SignSelect: {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpsraw(kScratchDoubleReg, i.InputSimd128Register(2), 15);
+ __ vpblendvb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ } else {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister mask = i.InputSimd128Register(2);
+ DCHECK_EQ(xmm0, mask);
+ __ movapd(kScratchDoubleReg, mask);
+ __ pxor(mask, mask);
+ __ pcmpgtw(mask, kScratchDoubleReg);
+ __ pblendvb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ // Restore mask.
+ __ movapd(mask, kScratchDoubleReg);
+ }
+ break;
+ }
+ case kX64I32x4SignSelect: {
+ __ Blendvps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2));
+ break;
+ }
+ case kX64I64x2SignSelect: {
+ __ Blendvpd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2));
+ break;
+ }
case kX64S128And: {
- __ Pand(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pand);
break;
}
case kX64S128Or: {
- __ Por(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(por);
break;
}
case kX64S128Xor: {
- __ Pxor(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pxor);
break;
}
case kX64S128Not: {
@@ -3734,76 +3633,93 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kX64S8x16LoadSplat: {
+ case kX64S128Load8Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- __ Pinsrb(i.OutputSimd128Register(), i.MemoryOperand(), 0);
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Pinsrb(dst, dst, i.MemoryOperand(), 0);
__ Pxor(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(i.OutputSimd128Register(), kScratchDoubleReg);
+ __ Pshufb(dst, kScratchDoubleReg);
break;
}
- case kX64S16x8LoadSplat: {
+ case kX64S128Load16Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- __ Pinsrw(i.OutputSimd128Register(), i.MemoryOperand(), 0);
- __ Pshuflw(i.OutputSimd128Register(), i.OutputSimd128Register(),
- uint8_t{0});
- __ Punpcklqdq(i.OutputSimd128Register(), i.OutputSimd128Register());
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Pinsrw(dst, dst, i.MemoryOperand(), 0);
+ __ Pshuflw(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
break;
}
- case kX64S32x4LoadSplat: {
+ case kX64S128Load32Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vbroadcastss(i.OutputSimd128Register(), i.MemoryOperand());
} else {
- __ Movss(i.OutputSimd128Register(), i.MemoryOperand());
- __ Shufps(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ __ movss(i.OutputSimd128Register(), i.MemoryOperand());
+ __ shufps(i.OutputSimd128Register(), i.OutputSimd128Register(),
byte{0});
}
break;
}
- case kX64S64x2LoadSplat: {
+ case kX64S128Load64Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Movddup(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kX64I16x8Load8x8S: {
+ case kX64S128Load8x8S: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovsxbw(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kX64I16x8Load8x8U: {
+ case kX64S128Load8x8U: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovzxbw(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kX64I32x4Load16x4S: {
+ case kX64S128Load16x4S: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovsxwd(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kX64I32x4Load16x4U: {
+ case kX64S128Load16x4U: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovzxwd(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kX64I64x2Load32x2S: {
+ case kX64S128Load32x2S: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovsxdq(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kX64I64x2Load32x2U: {
+ case kX64S128Load32x2U: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovzxdq(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kX64S128LoadMem32Zero: {
+ case kX64S128Store32Lane: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- __ Movd(i.OutputSimd128Register(), i.MemoryOperand());
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ uint8_t lane = i.InputUint8(index + 1);
+ if (lane == 0) {
+ __ Movss(operand, i.InputSimd128Register(index));
+ } else {
+ DCHECK_GE(3, lane);
+ __ Extractps(operand, i.InputSimd128Register(index), lane);
+ }
break;
}
- case kX64S128LoadMem64Zero: {
+ case kX64S128Store64Lane: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- __ Movq(i.OutputSimd128Register(), i.MemoryOperand());
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ uint8_t lane = i.InputUint8(index + 1);
+ if (lane == 0) {
+ __ Movlps(operand, i.InputSimd128Register(index));
+ } else {
+ DCHECK_EQ(1, lane);
+ __ Movhps(operand, i.InputSimd128Register(index));
+ }
break;
}
case kX64S32x4Swizzle: {
@@ -4005,7 +3921,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Por(dst, kScratchDoubleReg);
break;
}
- case kX64V64x2AnyTrue:
case kX64V32x4AnyTrue:
case kX64V16x8AnyTrue:
case kX64V8x16AnyTrue: {
@@ -4021,10 +3936,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
// 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
// respectively.
- case kX64V64x2AllTrue: {
- ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqq);
- break;
- }
case kX64V32x4AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
break;
@@ -4592,18 +4503,25 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
unwinding_info_writer_.MarkBlockWillExit();
- // We might need rcx and rdx for scratch.
+ // We might need rcx and r10 for scratch.
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & rcx.bit());
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & rdx.bit());
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & r10.bit());
+ X64OperandConverter g(this, nullptr);
int parameter_count =
static_cast<int>(call_descriptor->StackParameterCount());
- X64OperandConverter g(this, nullptr);
- Register pop_reg = additional_pop_count->IsImmediate()
- ? rcx
- : g.ToRegister(additional_pop_count);
- Register scratch_reg = pop_reg == rcx ? rdx : rcx;
- Register argc_reg =
- additional_pop_count->IsImmediate() ? pop_reg : scratch_reg;
+
+ // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_count != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (__ emit_debug_code()) {
+ __ cmpq(g.ToRegister(additional_pop_count), Immediate(0));
+ __ Assert(equal, AbortReason::kUnexpectedAdditionalPopValue);
+ }
+ }
+
+ Register argc_reg = rcx;
#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
@@ -4636,41 +4554,33 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
if (drop_jsargs) {
- // In addition to the slots given by {additional_pop_count}, we must pop all
- // arguments from the stack (including the receiver). This number of
- // arguments is given by max(1 + argc_reg, parameter_count).
- Label argc_reg_has_final_count;
- // Exclude the receiver to simplify the computation. We'll account for it at
- // the end.
- int parameter_count_withouth_receiver = parameter_count - 1;
- if (parameter_count_withouth_receiver != 0) {
- __ cmpq(argc_reg, Immediate(parameter_count_withouth_receiver));
- __ j(greater_equal, &argc_reg_has_final_count, Label::kNear);
- __ movq(argc_reg, Immediate(parameter_count_withouth_receiver));
- __ bind(&argc_reg_has_final_count);
- }
- // Add additional pop count.
- if (additional_pop_count->IsImmediate()) {
- DCHECK_EQ(pop_reg, argc_reg);
- int additional_count = g.ToConstant(additional_pop_count).ToInt32();
- if (additional_count != 0) {
- __ addq(pop_reg, Immediate(additional_count));
- }
- } else {
- __ addq(pop_reg, argc_reg);
- }
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ int parameter_count_without_receiver =
+ parameter_count - 1; // Exclude the receiver to simplify the
+ // computation. We'll account for it at the end.
+ Label mismatch_return;
+ Register scratch_reg = r10;
+ DCHECK_NE(argc_reg, scratch_reg);
+ __ cmpq(argc_reg, Immediate(parameter_count_without_receiver));
+ __ j(greater, &mismatch_return, Label::kNear);
+ __ Ret(parameter_count * kSystemPointerSize, scratch_reg);
+ __ bind(&mismatch_return);
__ PopReturnAddressTo(scratch_reg);
- __ leaq(rsp, Operand(rsp, pop_reg, times_system_pointer_size,
+ __ leaq(rsp, Operand(rsp, argc_reg, times_system_pointer_size,
kSystemPointerSize)); // Also pop the receiver.
// We use a return instead of a jump for better return address prediction.
__ PushReturnAddressFrom(scratch_reg);
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
+ Register scratch_reg = r10;
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
size_t pop_size = (parameter_count + additional_count) * kSystemPointerSize;
CHECK_LE(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
__ Ret(static_cast<int>(pop_size), scratch_reg);
} else {
+ Register pop_reg = g.ToRegister(additional_pop_count);
+ Register scratch_reg = pop_reg == r10 ? rcx : r10;
int pop_size = static_cast<int>(parameter_count * kSystemPointerSize);
__ PopReturnAddressTo(scratch_reg);
__ leaq(rsp, Operand(rsp, pop_reg, times_system_pointer_size,
@@ -4682,7 +4592,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
void CodeGenerator::FinishCode() { tasm()->PatchConstPool(); }
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {}
void CodeGenerator::IncrementStackAccessCounter(
InstructionOperand* source, InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 7312121a0a..f1958e8141 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -156,7 +156,6 @@ namespace compiler {
V(X64Peek) \
V(X64F64x2Splat) \
V(X64F64x2ExtractLane) \
- V(X64F64x2ReplaceLane) \
V(X64F64x2Abs) \
V(X64F64x2Neg) \
V(X64F64x2Sqrt) \
@@ -203,27 +202,18 @@ namespace compiler {
V(X64F32x4Round) \
V(X64I64x2Splat) \
V(X64I64x2ExtractLane) \
- V(X64I64x2ReplaceLane) \
V(X64I64x2Neg) \
+ V(X64I64x2BitMask) \
V(X64I64x2Shl) \
V(X64I64x2ShrS) \
V(X64I64x2Add) \
V(X64I64x2Sub) \
V(X64I64x2Mul) \
- V(X64I64x2MinS) \
- V(X64I64x2MaxS) \
V(X64I64x2Eq) \
- V(X64I64x2Ne) \
- V(X64I64x2GtS) \
- V(X64I64x2GeS) \
V(X64I64x2ShrU) \
- V(X64I64x2MinU) \
- V(X64I64x2MaxU) \
- V(X64I64x2GtU) \
- V(X64I64x2GeU) \
+ V(X64I64x2SignSelect) \
V(X64I32x4Splat) \
V(X64I32x4ExtractLane) \
- V(X64I32x4ReplaceLane) \
V(X64I32x4SConvertF32x4) \
V(X64I32x4SConvertI16x8Low) \
V(X64I32x4SConvertI16x8High) \
@@ -251,10 +241,9 @@ namespace compiler {
V(X64I32x4Abs) \
V(X64I32x4BitMask) \
V(X64I32x4DotI16x8S) \
+ V(X64I32x4SignSelect) \
V(X64I16x8Splat) \
- V(X64I16x8ExtractLaneU) \
V(X64I16x8ExtractLaneS) \
- V(X64I16x8ReplaceLane) \
V(X64I16x8SConvertI8x16Low) \
V(X64I16x8SConvertI8x16High) \
V(X64I16x8Neg) \
@@ -262,10 +251,10 @@ namespace compiler {
V(X64I16x8ShrS) \
V(X64I16x8SConvertI32x4) \
V(X64I16x8Add) \
- V(X64I16x8AddSaturateS) \
+ V(X64I16x8AddSatS) \
V(X64I16x8AddHoriz) \
V(X64I16x8Sub) \
- V(X64I16x8SubSaturateS) \
+ V(X64I16x8SubSatS) \
V(X64I16x8Mul) \
V(X64I16x8MinS) \
V(X64I16x8MaxS) \
@@ -277,8 +266,8 @@ namespace compiler {
V(X64I16x8UConvertI8x16High) \
V(X64I16x8ShrU) \
V(X64I16x8UConvertI32x4) \
- V(X64I16x8AddSaturateU) \
- V(X64I16x8SubSaturateU) \
+ V(X64I16x8AddSatU) \
+ V(X64I16x8SubSatU) \
V(X64I16x8MinU) \
V(X64I16x8MaxU) \
V(X64I16x8GtU) \
@@ -286,18 +275,23 @@ namespace compiler {
V(X64I16x8RoundingAverageU) \
V(X64I16x8Abs) \
V(X64I16x8BitMask) \
+ V(X64I16x8SignSelect) \
V(X64I8x16Splat) \
- V(X64I8x16ExtractLaneU) \
V(X64I8x16ExtractLaneS) \
- V(X64I8x16ReplaceLane) \
+ V(X64Pinsrb) \
+ V(X64Pinsrw) \
+ V(X64Pinsrd) \
+ V(X64Pinsrq) \
+ V(X64Pextrb) \
+ V(X64Pextrw) \
V(X64I8x16SConvertI16x8) \
V(X64I8x16Neg) \
V(X64I8x16Shl) \
V(X64I8x16ShrS) \
V(X64I8x16Add) \
- V(X64I8x16AddSaturateS) \
+ V(X64I8x16AddSatS) \
V(X64I8x16Sub) \
- V(X64I8x16SubSaturateS) \
+ V(X64I8x16SubSatS) \
V(X64I8x16Mul) \
V(X64I8x16MinS) \
V(X64I8x16MaxS) \
@@ -306,8 +300,8 @@ namespace compiler {
V(X64I8x16GtS) \
V(X64I8x16GeS) \
V(X64I8x16UConvertI16x8) \
- V(X64I8x16AddSaturateU) \
- V(X64I8x16SubSaturateU) \
+ V(X64I8x16AddSatU) \
+ V(X64I8x16SubSatU) \
V(X64I8x16ShrU) \
V(X64I8x16MinU) \
V(X64I8x16MaxU) \
@@ -316,6 +310,7 @@ namespace compiler {
V(X64I8x16RoundingAverageU) \
V(X64I8x16Abs) \
V(X64I8x16BitMask) \
+ V(X64I8x16SignSelect) \
V(X64S128Const) \
V(X64S128Zero) \
V(X64S128AllOnes) \
@@ -327,18 +322,18 @@ namespace compiler {
V(X64S128AndNot) \
V(X64I8x16Swizzle) \
V(X64I8x16Shuffle) \
- V(X64S8x16LoadSplat) \
- V(X64S16x8LoadSplat) \
- V(X64S32x4LoadSplat) \
- V(X64S64x2LoadSplat) \
- V(X64I16x8Load8x8S) \
- V(X64I16x8Load8x8U) \
- V(X64I32x4Load16x4S) \
- V(X64I32x4Load16x4U) \
- V(X64I64x2Load32x2S) \
- V(X64I64x2Load32x2U) \
- V(X64S128LoadMem32Zero) \
- V(X64S128LoadMem64Zero) \
+ V(X64S128Load8Splat) \
+ V(X64S128Load16Splat) \
+ V(X64S128Load32Splat) \
+ V(X64S128Load64Splat) \
+ V(X64S128Load8x8S) \
+ V(X64S128Load8x8U) \
+ V(X64S128Load16x4S) \
+ V(X64S128Load16x4U) \
+ V(X64S128Load32x2S) \
+ V(X64S128Load32x2U) \
+ V(X64S128Store32Lane) \
+ V(X64S128Store64Lane) \
V(X64S32x4Swizzle) \
V(X64S32x4Shuffle) \
V(X64S16x8Blend) \
@@ -364,8 +359,6 @@ namespace compiler {
V(X64S8x8Reverse) \
V(X64S8x4Reverse) \
V(X64S8x2Reverse) \
- V(X64V64x2AnyTrue) \
- V(X64V64x2AllTrue) \
V(X64V32x4AnyTrue) \
V(X64V32x4AllTrue) \
V(X64V16x8AnyTrue) \
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index 169753b40e..2af0877e53 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -126,9 +126,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Lea:
case kX64Dec32:
case kX64Inc32:
+ case kX64Pinsrb:
+ case kX64Pinsrw:
+ case kX64Pinsrd:
+ case kX64Pinsrq:
case kX64F64x2Splat:
case kX64F64x2ExtractLane:
- case kX64F64x2ReplaceLane:
case kX64F64x2Abs:
case kX64F64x2Neg:
case kX64F64x2Sqrt:
@@ -175,27 +178,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4Round:
case kX64I64x2Splat:
case kX64I64x2ExtractLane:
- case kX64I64x2ReplaceLane:
case kX64I64x2Neg:
+ case kX64I64x2BitMask:
case kX64I64x2Shl:
case kX64I64x2ShrS:
case kX64I64x2Add:
case kX64I64x2Sub:
case kX64I64x2Mul:
- case kX64I64x2MinS:
- case kX64I64x2MaxS:
case kX64I64x2Eq:
- case kX64I64x2Ne:
- case kX64I64x2GtS:
- case kX64I64x2GeS:
case kX64I64x2ShrU:
- case kX64I64x2MinU:
- case kX64I64x2MaxU:
- case kX64I64x2GtU:
- case kX64I64x2GeU:
+ case kX64I64x2SignSelect:
case kX64I32x4Splat:
case kX64I32x4ExtractLane:
- case kX64I32x4ReplaceLane:
case kX64I32x4SConvertF32x4:
case kX64I32x4SConvertI16x8Low:
case kX64I32x4SConvertI16x8High:
@@ -223,10 +217,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4Abs:
case kX64I32x4BitMask:
case kX64I32x4DotI16x8S:
+ case kX64I32x4SignSelect:
case kX64I16x8Splat:
- case kX64I16x8ExtractLaneU:
case kX64I16x8ExtractLaneS:
- case kX64I16x8ReplaceLane:
case kX64I16x8SConvertI8x16Low:
case kX64I16x8SConvertI8x16High:
case kX64I16x8Neg:
@@ -234,10 +227,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I16x8ShrS:
case kX64I16x8SConvertI32x4:
case kX64I16x8Add:
- case kX64I16x8AddSaturateS:
+ case kX64I16x8AddSatS:
case kX64I16x8AddHoriz:
case kX64I16x8Sub:
- case kX64I16x8SubSaturateS:
+ case kX64I16x8SubSatS:
case kX64I16x8Mul:
case kX64I16x8MinS:
case kX64I16x8MaxS:
@@ -249,8 +242,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I16x8UConvertI8x16High:
case kX64I16x8UConvertI32x4:
case kX64I16x8ShrU:
- case kX64I16x8AddSaturateU:
- case kX64I16x8SubSaturateU:
+ case kX64I16x8AddSatU:
+ case kX64I16x8SubSatU:
case kX64I16x8MinU:
case kX64I16x8MaxU:
case kX64I16x8GtU:
@@ -258,18 +251,17 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I16x8RoundingAverageU:
case kX64I16x8Abs:
case kX64I16x8BitMask:
+ case kX64I16x8SignSelect:
case kX64I8x16Splat:
- case kX64I8x16ExtractLaneU:
case kX64I8x16ExtractLaneS:
- case kX64I8x16ReplaceLane:
case kX64I8x16SConvertI16x8:
case kX64I8x16Neg:
case kX64I8x16Shl:
case kX64I8x16ShrS:
case kX64I8x16Add:
- case kX64I8x16AddSaturateS:
+ case kX64I8x16AddSatS:
case kX64I8x16Sub:
- case kX64I8x16SubSaturateS:
+ case kX64I8x16SubSatS:
case kX64I8x16Mul:
case kX64I8x16MinS:
case kX64I8x16MaxS:
@@ -278,8 +270,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I8x16GtS:
case kX64I8x16GeS:
case kX64I8x16UConvertI16x8:
- case kX64I8x16AddSaturateU:
- case kX64I8x16SubSaturateU:
+ case kX64I8x16AddSatU:
+ case kX64I8x16SubSatU:
case kX64I8x16ShrU:
case kX64I8x16MinU:
case kX64I8x16MaxU:
@@ -288,6 +280,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I8x16RoundingAverageU:
case kX64I8x16Abs:
case kX64I8x16BitMask:
+ case kX64I8x16SignSelect:
case kX64S128And:
case kX64S128Or:
case kX64S128Xor:
@@ -297,8 +290,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S128Zero:
case kX64S128AllOnes:
case kX64S128AndNot:
- case kX64V64x2AnyTrue:
- case kX64V64x2AllTrue:
case kX64V32x4AnyTrue:
case kX64V32x4AllTrue:
case kX64V16x8AnyTrue:
@@ -359,8 +350,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Movb:
case kX64Movw:
+ case kX64S128Store32Lane:
+ case kX64S128Store64Lane:
return kHasSideEffect;
+ case kX64Pextrb:
+ case kX64Pextrw:
case kX64Movl:
if (instr->HasOutput()) {
DCHECK_LE(1, instr->InputCount());
@@ -378,18 +373,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Movsd:
case kX64Movss:
case kX64Movdqu:
- case kX64S8x16LoadSplat:
- case kX64S16x8LoadSplat:
- case kX64S32x4LoadSplat:
- case kX64S64x2LoadSplat:
- case kX64I16x8Load8x8S:
- case kX64I16x8Load8x8U:
- case kX64I32x4Load16x4S:
- case kX64I32x4Load16x4U:
- case kX64I64x2Load32x2S:
- case kX64I64x2Load32x2U:
- case kX64S128LoadMem32Zero:
- case kX64S128LoadMem64Zero:
+ case kX64S128Load8Splat:
+ case kX64S128Load16Splat:
+ case kX64S128Load32Splat:
+ case kX64S128Load64Splat:
+ case kX64S128Load8x8S:
+ case kX64S128Load8x8U:
+ case kX64S128Load16x4S:
+ case kX64S128Load16x4U:
+ case kX64S128Load32x2S:
+ case kX64S128Load32x2U:
return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
case kX64Peek:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index db212677ea..7a8a2b4aa6 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -5,8 +5,11 @@
#include <algorithm>
#include "src/base/iterator.h"
+#include "src/base/logging.h"
#include "src/base/overflowing-math.h"
+#include "src/codegen/machine-type.h"
#include "src/compiler/backend/instruction-selector-impl.h"
+#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/roots/roots-inl.h"
@@ -90,6 +93,16 @@ class X64OperandGenerator final : public OperandGenerator {
return rep == MachineRepresentation::kWord32 ||
(COMPRESS_POINTERS_BOOL &&
(IsAnyTagged(rep) || IsAnyCompressed(rep)));
+ case kAVXFloat64Add:
+ case kAVXFloat64Sub:
+ case kAVXFloat64Mul:
+ DCHECK_EQ(MachineRepresentation::kFloat64, rep);
+ return true;
+ case kAVXFloat32Add:
+ case kAVXFloat32Sub:
+ case kAVXFloat32Mul:
+ DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ return true;
case kX64Cmp16:
case kX64Test16:
return rep == MachineRepresentation::kWord16;
@@ -178,12 +191,13 @@ class X64OperandGenerator final : public OperandGenerator {
size_t* input_count) {
{
LoadMatcher<ExternalReferenceMatcher> m(operand);
- if (m.index().HasValue() && m.object().HasValue() &&
- selector()->CanAddressRelativeToRootsRegister(m.object().Value())) {
+ if (m.index().HasResolvedValue() && m.object().HasResolvedValue() &&
+ selector()->CanAddressRelativeToRootsRegister(
+ m.object().ResolvedValue())) {
ptrdiff_t const delta =
- m.index().Value() +
+ m.index().ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
- selector()->isolate(), m.object().Value());
+ selector()->isolate(), m.object().ResolvedValue());
if (is_int32(delta)) {
inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
return kMode_Root;
@@ -229,7 +243,7 @@ class X64OperandGenerator final : public OperandGenerator {
namespace {
ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kX64Movss;
@@ -332,53 +346,93 @@ void InstructionSelector::VisitAbortCSAAssert(Node* node) {
Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
}
+void InstructionSelector::VisitLoadLane(Node* node) {
+ LoadLaneParameters params = LoadLaneParametersOf(node->op());
+ InstructionCode opcode = kArchNop;
+ if (params.rep == MachineType::Int8()) {
+ opcode = kX64Pinsrb;
+ } else if (params.rep == MachineType::Int16()) {
+ opcode = kX64Pinsrw;
+ } else if (params.rep == MachineType::Int32()) {
+ opcode = kX64Pinsrd;
+ } else if (params.rep == MachineType::Int64()) {
+ opcode = kX64Pinsrq;
+ } else {
+ UNREACHABLE();
+ }
+
+ X64OperandGenerator g(this);
+ InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+ // Input 0 is value node, 1 is lane idx, and GetEffectiveAddressMemoryOperand
+ // uses up to 3 inputs. This ordering is consistent with other operations that
+ // use the same opcode.
+ InstructionOperand inputs[5];
+ size_t input_count = 0;
+
+ inputs[input_count++] = g.UseRegister(node->InputAt(2));
+ inputs[input_count++] = g.UseImmediate(params.laneidx);
+
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ opcode |= AddressingModeField::encode(mode);
+
+ DCHECK_GE(5, input_count);
+
+ // x64 supports unaligned loads.
+ DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
+ if (params.kind == MemoryAccessKind::kProtected) {
+ opcode |= MiscField::encode(kMemoryAccessProtected);
+ }
+ Emit(opcode, 1, outputs, input_count, inputs);
+}
+
void InstructionSelector::VisitLoadTransform(Node* node) {
LoadTransformParameters params = LoadTransformParametersOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (params.transformation) {
- case LoadTransformation::kS8x16LoadSplat:
- opcode = kX64S8x16LoadSplat;
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kX64S128Load8Splat;
break;
- case LoadTransformation::kS16x8LoadSplat:
- opcode = kX64S16x8LoadSplat;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kX64S128Load16Splat;
break;
- case LoadTransformation::kS32x4LoadSplat:
- opcode = kX64S32x4LoadSplat;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kX64S128Load32Splat;
break;
- case LoadTransformation::kS64x2LoadSplat:
- opcode = kX64S64x2LoadSplat;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kX64S128Load64Splat;
break;
- case LoadTransformation::kI16x8Load8x8S:
- opcode = kX64I16x8Load8x8S;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kX64S128Load8x8S;
break;
- case LoadTransformation::kI16x8Load8x8U:
- opcode = kX64I16x8Load8x8U;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kX64S128Load8x8U;
break;
- case LoadTransformation::kI32x4Load16x4S:
- opcode = kX64I32x4Load16x4S;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kX64S128Load16x4S;
break;
- case LoadTransformation::kI32x4Load16x4U:
- opcode = kX64I32x4Load16x4U;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kX64S128Load16x4U;
break;
- case LoadTransformation::kI64x2Load32x2S:
- opcode = kX64I64x2Load32x2S;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kX64S128Load32x2S;
break;
- case LoadTransformation::kI64x2Load32x2U:
- opcode = kX64I64x2Load32x2U;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kX64S128Load32x2U;
break;
- case LoadTransformation::kS128LoadMem32Zero:
- opcode = kX64S128LoadMem32Zero;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kX64Movss;
break;
- case LoadTransformation::kS128LoadMem64Zero:
- opcode = kX64S128LoadMem64Zero;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kX64Movsd;
break;
default:
UNREACHABLE();
}
// x64 supports unaligned loads
- DCHECK_NE(params.kind, LoadKind::kUnaligned);
+ DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
InstructionCode code = opcode;
- if (params.kind == LoadKind::kProtected) {
+ if (params.kind == MemoryAccessKind::kProtected) {
code |= MiscField::encode(kMemoryAccessProtected);
}
VisitLoad(node, node, code);
@@ -486,6 +540,40 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitStoreLane(Node* node) {
+ X64OperandGenerator g(this);
+
+ StoreLaneParameters params = StoreLaneParametersOf(node->op());
+ InstructionCode opcode = kArchNop;
+ if (params.rep == MachineRepresentation::kWord8) {
+ opcode = kX64Pextrb;
+ } else if (params.rep == MachineRepresentation::kWord16) {
+ opcode = kX64Pextrw;
+ } else if (params.rep == MachineRepresentation::kWord32) {
+ opcode = kX64S128Store32Lane;
+ } else if (params.rep == MachineRepresentation::kWord64) {
+ opcode = kX64S128Store64Lane;
+ } else {
+ UNREACHABLE();
+ }
+
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ opcode |= AddressingModeField::encode(addressing_mode);
+
+ if (params.kind == MemoryAccessKind::kProtected) {
+ opcode |= MiscField::encode(kMemoryAccessProtected);
+ }
+
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(2));
+ inputs[input_count++] = value_operand;
+ inputs[input_count++] = g.UseImmediate(params.laneidx);
+ DCHECK_GE(4, input_count);
+ Emit(opcode, 0, nullptr, input_count, inputs);
+}
+
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
@@ -635,7 +723,7 @@ bool TryMergeTruncateInt64ToInt32IntoLoad(InstructionSelector* selector,
if (load->opcode() == IrOpcode::kLoad && selector->CanCover(node, load)) {
LoadRepresentation load_rep = LoadRepresentationOf(load->op());
MachineRepresentation rep = load_rep.representation();
- InstructionCode opcode = kArchNop;
+ InstructionCode opcode;
switch (rep) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
@@ -648,7 +736,7 @@ bool TryMergeTruncateInt64ToInt32IntoLoad(InstructionSelector* selector,
case MachineRepresentation::kWord64:
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTagged:
- case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
opcode = kX64Movl;
break;
default:
@@ -1035,12 +1123,14 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
// {EmitIdentity} reuses the virtual register of the first input
// for the output. This is exactly what we want here.
EmitIdentity(node);
- } else if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
+ } else if (m.right().HasResolvedValue() &&
+ g.CanBeImmediate(m.right().node())) {
// Turn subtractions of constant values into immediate "leal" instructions
// by negating the value.
- Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.TempImmediate(base::NegateWithWraparound(m.right().Value())));
+ Emit(
+ kX64Lea32 | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::NegateWithWraparound(m.right().ResolvedValue())));
} else {
VisitBinop(this, node, kX64Sub32);
}
@@ -1052,12 +1142,12 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
if (m.left().Is(0)) {
Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
} else {
- if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
+ if (m.right().HasResolvedValue() && g.CanBeImmediate(m.right().node())) {
// Turn subtractions of constant values into immediate "leaq" instructions
// by negating the value.
Emit(kX64Lea | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.TempImmediate(-static_cast<int32_t>(m.right().Value())));
+ g.TempImmediate(-static_cast<int32_t>(m.right().ResolvedValue())));
return;
}
VisitBinop(this, node, kX64Sub);
@@ -1269,7 +1359,7 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
MachineRepresentation rep = load_rep.representation();
- InstructionCode opcode = kArchNop;
+ InstructionCode opcode;
switch (rep) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
@@ -1283,7 +1373,6 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
size_t input_count = 0;
@@ -1401,14 +1490,60 @@ void VisitRRO(InstructionSelector* selector, Node* node,
}
void VisitFloatBinop(InstructionSelector* selector, Node* node,
- ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ InstructionCode avx_opcode, InstructionCode sse_opcode) {
X64OperandGenerator g(selector);
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- InstructionOperand operand1 = g.Use(node->InputAt(1));
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ InstructionOperand inputs[8];
+ size_t input_count = 0;
+ InstructionOperand outputs[1];
+ size_t output_count = 0;
+
+ if (left == right) {
+ // If both inputs refer to the same operand, enforce allocating a register
+ // for both of them to ensure that we don't end up generating code like
+ // this:
+ //
+ // movss rax, [rbp-0x10]
+ // addss rax, [rbp-0x10]
+ // jo label
+ InstructionOperand const input = g.UseRegister(left);
+ inputs[input_count++] = input;
+ inputs[input_count++] = input;
+ } else {
+ int effect_level = selector->GetEffectLevel(node);
+ if (node->op()->HasProperty(Operator::kCommutative) &&
+ (g.CanBeBetterLeftOperand(right) ||
+ g.CanBeMemoryOperand(avx_opcode, node, left, effect_level)) &&
+ (!g.CanBeBetterLeftOperand(left) ||
+ !g.CanBeMemoryOperand(avx_opcode, node, right, effect_level))) {
+ std::swap(left, right);
+ }
+ if (g.CanBeMemoryOperand(avx_opcode, node, right, effect_level)) {
+ inputs[input_count++] = g.UseRegister(left);
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count);
+ avx_opcode |= AddressingModeField::encode(addressing_mode);
+ sse_opcode |= AddressingModeField::encode(addressing_mode);
+ } else {
+ inputs[input_count++] = g.UseRegister(left);
+ inputs[input_count++] = g.Use(right);
+ }
+ }
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
+ outputs[output_count++] = g.DefineAsRegister(node);
+ DCHECK_EQ(1u, output_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+ selector->Emit(avx_opcode, output_count, outputs, input_count, inputs);
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ DCHECK_EQ(1u, output_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+ selector->Emit(sse_opcode, output_count, outputs, input_count, inputs);
}
}
@@ -1902,8 +2037,8 @@ void VisitWord64EqualImpl(InstructionSelector* selector, Node* node,
const RootsTable& roots_table = selector->isolate()->roots_table();
RootIndex root_index;
HeapObjectBinopMatcher m(node);
- if (m.right().HasValue() &&
- roots_table.IsRootHandle(m.right().Value(), &root_index)) {
+ if (m.right().HasResolvedValue() &&
+ roots_table.IsRootHandle(m.right().ResolvedValue(), &root_index)) {
InstructionCode opcode =
kX64Cmp | AddressingModeField::encode(kMode_Root);
return VisitCompare(
@@ -1929,14 +2064,14 @@ void VisitWord32EqualImpl(InstructionSelector* selector, Node* node,
// present.
{
CompressedHeapObjectBinopMatcher m(node);
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
left = m.left().node();
- right = m.right().Value();
+ right = m.right().ResolvedValue();
} else {
HeapObjectBinopMatcher m2(node);
- if (m2.right().HasValue()) {
+ if (m2.right().HasResolvedValue()) {
left = m2.left().node();
- right = m2.right().Value();
+ right = m2.right().ResolvedValue();
}
}
}
@@ -2442,7 +2577,8 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Float64Matcher mleft(left);
- if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
+ if (mleft.HasResolvedValue() &&
+ (bit_cast<uint64_t>(mleft.ResolvedValue()) >> 32) == 0u) {
Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
return;
}
@@ -2486,7 +2622,7 @@ void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kWord32AtomicExchangeInt8;
@@ -2505,7 +2641,7 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kX64Word64AtomicExchangeUint8;
@@ -2527,7 +2663,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2540,14 +2676,13 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Uint8()) {
opcode = kX64Word64AtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
@@ -2558,14 +2693,13 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
opcode = kX64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2578,14 +2712,13 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Uint8()) {
opcode = kX64Word64AtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
@@ -2596,7 +2729,6 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
opcode = kX64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
@@ -2605,7 +2737,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = int8_op;
} else if (type == MachineType::Uint8()) {
@@ -2618,7 +2750,6 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
opcode = word32_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinop(this, node, opcode);
}
@@ -2641,7 +2772,7 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
ArchOpcode word64_op) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Uint8()) {
opcode = uint8_op;
} else if (type == MachineType::Uint16()) {
@@ -2652,7 +2783,6 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
opcode = word64_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinop(this, node, opcode);
}
@@ -2670,96 +2800,87 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
-#define SIMD_TYPES(V) \
- V(F64x2) \
- V(F32x4) \
- V(I64x2) \
- V(I32x4) \
- V(I16x8) \
- V(I8x16)
+#define SIMD_BINOP_SSE_AVX_LIST(V) \
+ V(F64x2Add) \
+ V(F64x2Sub) \
+ V(F64x2Mul) \
+ V(F64x2Div) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Lt) \
+ V(F64x2Le) \
+ V(F32x4Add) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Div) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Eq) \
+ V(I32x4Add) \
+ V(I32x4AddHoriz) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4MinS) \
+ V(I32x4MaxS) \
+ V(I32x4Eq) \
+ V(I32x4GtS) \
+ V(I32x4MinU) \
+ V(I32x4MaxU) \
+ V(I32x4DotI16x8S) \
+ V(I16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4) \
+ V(I16x8Add) \
+ V(I16x8AddSatS) \
+ V(I16x8AddHoriz) \
+ V(I16x8Sub) \
+ V(I16x8SubSatS) \
+ V(I16x8Mul) \
+ V(I16x8MinS) \
+ V(I16x8MaxS) \
+ V(I16x8Eq) \
+ V(I16x8GtS) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
+ V(I16x8MinU) \
+ V(I16x8MaxU) \
+ V(I16x8RoundingAverageU) \
+ V(I8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8) \
+ V(I8x16Add) \
+ V(I8x16AddSatS) \
+ V(I8x16Sub) \
+ V(I8x16SubSatS) \
+ V(I8x16MinS) \
+ V(I8x16MaxS) \
+ V(I8x16Eq) \
+ V(I8x16GtS) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU) \
+ V(I8x16MinU) \
+ V(I8x16MaxU) \
+ V(I8x16RoundingAverageU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor)
#define SIMD_BINOP_LIST(V) \
- V(F64x2Add) \
- V(F64x2Sub) \
- V(F64x2Mul) \
- V(F64x2Div) \
V(F64x2Min) \
V(F64x2Max) \
- V(F64x2Eq) \
- V(F64x2Ne) \
- V(F64x2Lt) \
- V(F64x2Le) \
- V(F32x4Add) \
V(F32x4AddHoriz) \
- V(F32x4Sub) \
- V(F32x4Mul) \
- V(F32x4Div) \
V(F32x4Min) \
V(F32x4Max) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(I64x2Add) \
- V(I64x2Sub) \
- V(I64x2Eq) \
- V(I64x2GtS) \
- V(I32x4Add) \
- V(I32x4AddHoriz) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4MinS) \
- V(I32x4MaxS) \
- V(I32x4Eq) \
- V(I32x4GtS) \
V(I32x4GeS) \
- V(I32x4MinU) \
- V(I32x4MaxU) \
V(I32x4GeU) \
- V(I32x4DotI16x8S) \
- V(I16x8SConvertI32x4) \
- V(I16x8Add) \
- V(I16x8AddSaturateS) \
- V(I16x8AddHoriz) \
- V(I16x8Sub) \
- V(I16x8SubSaturateS) \
- V(I16x8Mul) \
- V(I16x8MinS) \
- V(I16x8MaxS) \
- V(I16x8Eq) \
- V(I16x8GtS) \
V(I16x8GeS) \
- V(I16x8AddSaturateU) \
- V(I16x8SubSaturateU) \
- V(I16x8MinU) \
- V(I16x8MaxU) \
V(I16x8GeU) \
- V(I16x8RoundingAverageU) \
- V(I8x16SConvertI16x8) \
- V(I8x16Add) \
- V(I8x16AddSaturateS) \
- V(I8x16Sub) \
- V(I8x16SubSaturateS) \
- V(I8x16MinS) \
- V(I8x16MaxS) \
- V(I8x16Eq) \
- V(I8x16GtS) \
V(I8x16GeS) \
- V(I8x16AddSaturateU) \
- V(I8x16SubSaturateU) \
- V(I8x16MinU) \
- V(I8x16MaxU) \
- V(I8x16GeU) \
- V(I8x16RoundingAverageU) \
- V(S128And) \
- V(S128Or) \
- V(S128Xor)
+ V(I8x16GeU)
#define SIMD_BINOP_ONE_TEMP_LIST(V) \
- V(I64x2Ne) \
- V(I64x2GeS) \
- V(I64x2GtU) \
- V(I64x2GeU) \
V(I32x4Ne) \
V(I32x4GtU) \
V(I16x8Ne) \
@@ -2776,6 +2897,7 @@ VISIT_ATOMIC_BINOP(Xor)
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(I64x2Neg) \
+ V(I64x2BitMask) \
V(I32x4SConvertI16x8Low) \
V(I32x4SConvertI16x8High) \
V(I32x4Neg) \
@@ -2809,13 +2931,11 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16ShrU)
#define SIMD_ANYTRUE_LIST(V) \
- V(V64x2AnyTrue) \
V(V32x4AnyTrue) \
V(V16x8AnyTrue) \
V(V8x16AnyTrue)
#define SIMD_ALLTRUE_LIST(V) \
- V(V64x2AllTrue) \
V(V32x4AllTrue) \
V(V16x8AllTrue) \
V(V8x16AllTrue)
@@ -2845,56 +2965,97 @@ void InstructionSelector::VisitS128Zero(Node* node) {
Emit(kX64S128Zero, g.DefineAsRegister(node));
}
-#define VISIT_SIMD_SPLAT(Type) \
- void InstructionSelector::Visit##Type##Splat(Node* node) { \
- X64OperandGenerator g(this); \
- Emit(kX64##Type##Splat, g.DefineAsRegister(node), \
- g.Use(node->InputAt(0))); \
+#define SIMD_TYPES_FOR_SPLAT(V) \
+ V(I64x2) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+// Splat with an optimization for const 0.
+#define VISIT_SIMD_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ X64OperandGenerator g(this); \
+ Node* input = node->InputAt(0); \
+ if (g.CanBeImmediate(input) && g.GetImmediateIntegerValue(input) == 0) { \
+ Emit(kX64S128Zero, g.DefineAsRegister(node)); \
+ } else { \
+ Emit(kX64##Type##Splat, g.DefineAsRegister(node), g.Use(input)); \
+ } \
}
-SIMD_TYPES(VISIT_SIMD_SPLAT)
+SIMD_TYPES_FOR_SPLAT(VISIT_SIMD_SPLAT)
#undef VISIT_SIMD_SPLAT
+#undef SIMD_TYPES_FOR_SPLAT
-#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
- void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
- X64OperandGenerator g(this); \
- int32_t lane = OpParameter<int32_t>(node->op()); \
- Emit(kX64##Type##ExtractLane##Sign, g.DefineAsRegister(node), \
- g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
- }
-SIMD_VISIT_EXTRACT_LANE(F64x2, )
-SIMD_VISIT_EXTRACT_LANE(F32x4, )
-SIMD_VISIT_EXTRACT_LANE(I64x2, )
-SIMD_VISIT_EXTRACT_LANE(I32x4, )
-SIMD_VISIT_EXTRACT_LANE(I16x8, U)
-SIMD_VISIT_EXTRACT_LANE(I16x8, S)
-SIMD_VISIT_EXTRACT_LANE(I8x16, U)
-SIMD_VISIT_EXTRACT_LANE(I8x16, S)
+void InstructionSelector::VisitF64x2Splat(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64F64x2Splat, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitF32x4Splat(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ Emit(kX64F32x4Splat, dst, g.UseRegister(node->InputAt(0)));
+}
+
+#define SIMD_VISIT_EXTRACT_LANE(Type, Sign, Op) \
+ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
+ X64OperandGenerator g(this); \
+ int32_t lane = OpParameter<int32_t>(node->op()); \
+ Emit(kX64##Op, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), \
+ g.UseImmediate(lane)); \
+ }
+SIMD_VISIT_EXTRACT_LANE(F64x2, , F64x2ExtractLane)
+SIMD_VISIT_EXTRACT_LANE(F32x4, , F32x4ExtractLane)
+SIMD_VISIT_EXTRACT_LANE(I64x2, , I64x2ExtractLane)
+SIMD_VISIT_EXTRACT_LANE(I32x4, , I32x4ExtractLane)
+SIMD_VISIT_EXTRACT_LANE(I16x8, S, I16x8ExtractLaneS)
+SIMD_VISIT_EXTRACT_LANE(I16x8, U, Pextrw)
+SIMD_VISIT_EXTRACT_LANE(I8x16, S, I8x16ExtractLaneS)
+SIMD_VISIT_EXTRACT_LANE(I8x16, U, Pextrb)
#undef SIMD_VISIT_EXTRACT_LANE
-#define VISIT_SIMD_REPLACE_LANE(Type) \
- void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
- X64OperandGenerator g(this); \
- int32_t lane = OpParameter<int32_t>(node->op()); \
- Emit(kX64##Type##ReplaceLane, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), \
- g.Use(node->InputAt(1))); \
+void InstructionSelector::VisitF32x4ReplaceLane(Node* node) {
+ X64OperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node->op());
+ Emit(kX64F32x4ReplaceLane, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
+ g.Use(node->InputAt(1)));
+}
+
+#define VISIT_SIMD_REPLACE_LANE(TYPE, OPCODE) \
+ void InstructionSelector::Visit##TYPE##ReplaceLane(Node* node) { \
+ X64OperandGenerator g(this); \
+ int32_t lane = OpParameter<int32_t>(node->op()); \
+ Emit(OPCODE, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), \
+ g.UseImmediate(lane), g.Use(node->InputAt(1))); \
}
-SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
+
+#define SIMD_TYPES_FOR_REPLACE_LANE(V) \
+ V(F64x2, kX64Pinsrq) \
+ V(I64x2, kX64Pinsrq) \
+ V(I32x4, kX64Pinsrd) \
+ V(I16x8, kX64Pinsrw) \
+ V(I8x16, kX64Pinsrb)
+
+SIMD_TYPES_FOR_REPLACE_LANE(VISIT_SIMD_REPLACE_LANE)
+#undef SIMD_TYPES_FOR_REPLACE_LANE
#undef VISIT_SIMD_REPLACE_LANE
-#define VISIT_SIMD_SHIFT(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- if (g.CanBeImmediate(node->InputAt(1))) { \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1))); \
- } else { \
- InstructionOperand temps[] = {g.TempSimd128Register(), \
- g.TempRegister()}; \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseUniqueRegister(node->InputAt(0)), \
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
- } \
+#define VISIT_SIMD_SHIFT(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ InstructionOperand dst = IsSupported(AVX) ? g.DefineAsRegister(node) \
+ : g.DefineSameAsFirst(node); \
+ if (g.CanBeImmediate(node->InputAt(1))) { \
+ Emit(kX64##Opcode, dst, g.UseRegister(node->InputAt(0)), \
+ g.UseImmediate(node->InputAt(1))); \
+ } else { \
+ InstructionOperand temps[] = {g.TempSimd128Register(), \
+ g.TempRegister()}; \
+ Emit(kX64##Opcode, dst, g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
+ } \
}
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
@@ -2938,6 +3099,21 @@ SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
+#define VISIT_SIMD_BINOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ if (IsSupported(AVX)) { \
+ Emit(kX64##Opcode, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
+ } else { \
+ Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
+ } \
+ }
+SIMD_BINOP_SSE_AVX_LIST(VISIT_SIMD_BINOP)
+#undef VISIT_SIMD_BINOP
+#undef SIMD_BINOP_SSE_AVX_LIST
+
#define VISIT_SIMD_BINOP_ONE_TEMP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
X64OperandGenerator g(this); \
@@ -2970,7 +3146,6 @@ SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE)
SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE)
#undef VISIT_SIMD_ALLTRUE
#undef SIMD_ALLTRUE_LIST
-#undef SIMD_TYPES
void InstructionSelector::VisitS128Select(Node* node) {
X64OperandGenerator g(this);
@@ -2979,6 +3154,40 @@ void InstructionSelector::VisitS128Select(Node* node) {
g.UseRegister(node->InputAt(2)));
}
+namespace {
+void VisitSignSelect(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ X64OperandGenerator g(selector);
+ // signselect(x, y, -1) = x
+ // pblendvb(dst, x, y, -1) = dst <- y, so we need to swap x and y.
+ if (selector->IsSupported(AVX)) {
+ selector->Emit(
+ opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(2)));
+ } else {
+ selector->Emit(
+ opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(2), xmm0));
+ }
+}
+} // namespace
+
+void InstructionSelector::VisitI8x16SignSelect(Node* node) {
+ VisitSignSelect(this, node, kX64I8x16SignSelect);
+}
+
+void InstructionSelector::VisitI16x8SignSelect(Node* node) {
+ VisitSignSelect(this, node, kX64I16x8SignSelect);
+}
+
+void InstructionSelector::VisitI32x4SignSelect(Node* node) {
+ VisitSignSelect(this, node, kX64I32x4SignSelect);
+}
+
+void InstructionSelector::VisitI64x2SignSelect(Node* node) {
+ VisitSignSelect(this, node, kX64I64x2SignSelect);
+}
+
void InstructionSelector::VisitS128AndNot(Node* node) {
X64OperandGenerator g(this);
// andnps a b does ~a & b, but we want a & !b, so flip the input.
@@ -3045,48 +3254,6 @@ void InstructionSelector::VisitI64x2Mul(Node* node) {
g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
}
-void InstructionSelector::VisitI64x2MinS(Node* node) {
- X64OperandGenerator g(this);
- if (this->IsSupported(SSE4_2)) {
- InstructionOperand temps[] = {g.TempFpRegister(xmm0)};
- Emit(kX64I64x2MinS, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
- } else {
- InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister(),
- g.TempRegister()};
- Emit(kX64I64x2MinS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
- arraysize(temps), temps);
- }
-}
-
-void InstructionSelector::VisitI64x2MaxS(Node* node) {
- X64OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempFpRegister(xmm0)};
- Emit(kX64I64x2MaxS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- arraysize(temps), temps);
-}
-
-void InstructionSelector::VisitI64x2MinU(Node* node) {
- X64OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register(),
- g.TempFpRegister(xmm0)};
- Emit(kX64I64x2MinU, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
-}
-
-void InstructionSelector::VisitI64x2MaxU(Node* node) {
- X64OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register(),
- g.TempFpRegister(xmm0)};
- Emit(kX64I64x2MaxU, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- arraysize(temps), temps);
-}
-
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
@@ -3102,12 +3269,6 @@ void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
}
-void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64I16x8UConvertI32x4, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
-}
-
void InstructionSelector::VisitI16x8BitMask(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
@@ -3115,12 +3276,6 @@ void InstructionSelector::VisitI16x8BitMask(Node* node) {
g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps);
}
-void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64I8x16UConvertI16x8, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
-}
-
void InstructionSelector::VisitI8x16Mul(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
index 32c5168466..d7eaa137f1 100644
--- a/deps/v8/src/compiler/bytecode-analysis.h
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -100,6 +100,8 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
public:
BytecodeAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone,
BailoutId osr_bailout_id, bool analyze_liveness);
+ BytecodeAnalysis(const BytecodeAnalysis&) = delete;
+ BytecodeAnalysis& operator=(const BytecodeAnalysis&) = delete;
// Return true if the given offset is a loop header
bool IsLoopHeader(int offset) const;
@@ -166,8 +168,6 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
ZoneMap<int, LoopInfo> header_to_info_;
int osr_entry_point_;
BytecodeLivenessMap liveness_map_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeAnalysis);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 7855bc4c44..14d014bca6 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -35,13 +35,16 @@ class BytecodeGraphBuilder {
BytecodeGraphBuilder(JSHeapBroker* broker, Zone* local_zone,
NativeContextRef const& native_context,
SharedFunctionInfoRef const& shared_info,
- FeedbackVectorRef const& feedback_vector,
+ FeedbackCellRef const& feedback_cell,
BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions, int inlining_id,
CodeKind code_kind, BytecodeGraphBuilderFlags flags,
TickCounter* tick_counter);
+ BytecodeGraphBuilder(const BytecodeGraphBuilder&) = delete;
+ BytecodeGraphBuilder& operator=(const BytecodeGraphBuilder&) = delete;
+
// Creates a graph by visiting bytecodes.
void CreateGraph();
@@ -67,6 +70,7 @@ class BytecodeGraphBuilder {
bool native_context_independent() const {
return CodeKindIsNativeContextIndependentJSFunction(code_kind_);
}
+ bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; }
bool generate_full_feedback_collection() const {
// NCI code currently collects full feedback.
DCHECK_IMPLIES(native_context_independent(),
@@ -117,10 +121,16 @@ class BytecodeGraphBuilder {
// Checks the optimization marker and potentially triggers compilation or
// installs the finished code object.
- // Only relevant for specific code kinds (see
- // CodeKindChecksOptimizationMarker).
+ // Only relevant for specific code kinds (see CodeKindCanTierUp).
void MaybeBuildTierUpCheck();
+ // Like bytecode, NCI code must collect call feedback to preserve proper
+ // behavior of inlining heuristics when tiering up to Turbofan in the future.
+ // The invocation count (how often a particular JSFunction has been called)
+ // is tracked by the callee. For bytecode, this happens in the
+ // InterpreterEntryTrampoline, for NCI code it happens here in the prologue.
+ void MaybeBuildIncrementInvocationCount();
+
// Builder for loading the a native context field.
Node* BuildLoadNativeContextField(int index);
@@ -254,7 +264,7 @@ class BytecodeGraphBuilder {
const Operator* op, Node* receiver, FeedbackSlot load_slot,
FeedbackSlot call_slot);
JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadNamed(
- const Operator* op, Node* receiver, FeedbackSlot slot);
+ const Operator* op, FeedbackSlot slot);
JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadKeyed(
const Operator* op, Node* receiver, Node* key, FeedbackSlot slot);
JSTypeHintLowering::LoweringResult TryBuildSimplifiedStoreNamed(
@@ -277,7 +287,7 @@ class BytecodeGraphBuilder {
uint32_t depth);
// Helper function to create for-in mode from the recorded type feedback.
- ForInMode GetForInMode(int operand_index);
+ ForInMode GetForInMode(FeedbackSlot slot);
// Helper function to compute call frequency from the recorded type
// feedback. Returns unknown if invocation count is unknown. Returns 0 if
@@ -415,6 +425,7 @@ class BytecodeGraphBuilder {
// The native context for which we optimize.
NativeContextRef const native_context_;
SharedFunctionInfoRef const shared_info_;
+ FeedbackCellRef const feedback_cell_;
FeedbackVectorRef const feedback_vector_;
CallFrequency const invocation_frequency_;
JSTypeHintLowering const type_hint_lowering_;
@@ -480,8 +491,6 @@ class BytecodeGraphBuilder {
static constexpr int kCompareOperationHintIndex = 1;
static constexpr int kCountOperationHintIndex = 0;
static constexpr int kUnaryOperationHintIndex = 0;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilder);
};
// The abstract execution environment simulates the content of the interpreter
@@ -980,7 +989,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
JSHeapBroker* broker, Zone* local_zone,
NativeContextRef const& native_context,
SharedFunctionInfoRef const& shared_info,
- FeedbackVectorRef const& feedback_vector, BailoutId osr_offset,
+ FeedbackCellRef const& feedback_cell, BailoutId osr_offset,
JSGraph* jsgraph, CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions, int inlining_id, CodeKind code_kind,
BytecodeGraphBuilderFlags flags, TickCounter* tick_counter)
@@ -989,10 +998,11 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
jsgraph_(jsgraph),
native_context_(native_context),
shared_info_(shared_info),
- feedback_vector_(feedback_vector),
+ feedback_cell_(feedback_cell),
+ feedback_vector_(feedback_cell.value().AsFeedbackVector()),
invocation_frequency_(invocation_frequency),
type_hint_lowering_(
- broker, jsgraph, feedback_vector,
+ broker, jsgraph, feedback_vector_,
(flags & BytecodeGraphBuilderFlag::kBailoutOnUninitialized)
? JSTypeHintLowering::kBailoutOnUninitialized
: JSTypeHintLowering::kNoFlags),
@@ -1046,23 +1056,17 @@ void BytecodeGraphBuilder::CreateFeedbackCellNode() {
DCHECK_NULL(feedback_cell_node_);
if (native_context_independent()) {
feedback_cell_node_ = BuildLoadFeedbackCell();
+ } else if (is_turboprop()) {
+ feedback_cell_node_ = jsgraph()->Constant(feedback_cell_);
}
}
Node* BytecodeGraphBuilder::BuildLoadFeedbackCell() {
DCHECK(native_context_independent());
DCHECK_NULL(feedback_cell_node_);
-
- Environment* env = environment();
- Node* control = env->GetControlDependency();
- Node* effect = env->GetEffectDependency();
-
- Node* feedback_cell = effect = graph()->NewNode(
+ return NewNode(
simplified()->LoadField(AccessBuilder::ForJSFunctionFeedbackCell()),
- GetFunctionClosure(), effect, control);
-
- env->UpdateEffectDependency(effect);
- return feedback_cell;
+ GetFunctionClosure());
}
void BytecodeGraphBuilder::CreateFeedbackVectorNode() {
@@ -1079,38 +1083,22 @@ Node* BytecodeGraphBuilder::BuildLoadFeedbackVector() {
// The feedback vector must exist and remain live while the generated code
// lives. Specifically that means it must be created when NCI code is
// installed, and must not be flushed.
-
- Environment* env = environment();
- Node* control = env->GetControlDependency();
- Node* effect = env->GetEffectDependency();
-
- Node* vector = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFeedbackCellValue()),
- feedback_cell_node(), effect, control);
-
- env->UpdateEffectDependency(effect);
- return vector;
+ return NewNode(simplified()->LoadField(AccessBuilder::ForFeedbackCellValue()),
+ feedback_cell_node());
}
Node* BytecodeGraphBuilder::BuildLoadFeedbackCell(int index) {
if (native_context_independent()) {
- Environment* env = environment();
- Node* control = env->GetControlDependency();
- Node* effect = env->GetEffectDependency();
-
// TODO(jgruber,v8:8888): Assumes that the feedback vector has been
// allocated.
- Node* closure_feedback_cell_array = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForFeedbackVectorClosureFeedbackCellArray()),
- feedback_vector_node(), effect, control);
+ Node* closure_feedback_cell_array =
+ NewNode(simplified()->LoadField(
+ AccessBuilder::ForFeedbackVectorClosureFeedbackCellArray()),
+ feedback_vector_node());
- Node* feedback_cell = effect = graph()->NewNode(
+ return NewNode(
simplified()->LoadField(AccessBuilder::ForFixedArraySlot(index)),
- closure_feedback_cell_array, effect, control);
-
- env->UpdateEffectDependency(effect);
- return feedback_cell;
+ closure_feedback_cell_array);
} else {
return jsgraph()->Constant(feedback_vector().GetClosureFeedbackCell(index));
}
@@ -1126,34 +1114,49 @@ void BytecodeGraphBuilder::CreateNativeContextNode() {
Node* BytecodeGraphBuilder::BuildLoadNativeContext() {
DCHECK(native_context_independent());
DCHECK_NULL(native_context_node_);
-
- Environment* env = environment();
- Node* control = env->GetControlDependency();
- Node* effect = env->GetEffectDependency();
- Node* context = env->Context();
-
- Node* context_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- context, effect, control);
- Node* native_context = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapNativeContext()),
- context_map, effect, control);
-
- env->UpdateEffectDependency(effect);
- return native_context;
+ Node* context_map = NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ environment()->Context());
+ return NewNode(simplified()->LoadField(AccessBuilder::ForMapNativeContext()),
+ context_map);
}
void BytecodeGraphBuilder::MaybeBuildTierUpCheck() {
- if (!CodeKindChecksOptimizationMarker(code_kind())) return;
+ // For OSR we don't tier up, so we don't need to build this check. Also
+ // tiering up currently tail calls to IET which tail calls aren't supported
+ // with OSR. See AdjustStackPointerForTailCall.
+ if (!CodeKindCanTierUp(code_kind()) || osr_) return;
+
+ int parameter_count = bytecode_array().parameter_count();
+ Node* target = GetFunctionClosure();
+ Node* new_target = graph()->NewNode(
+ common()->Parameter(
+ Linkage::GetJSCallNewTargetParamIndex(parameter_count),
+ "%new.target"),
+ graph()->start());
+ Node* argc = graph()->NewNode(
+ common()->Parameter(Linkage::GetJSCallArgCountParamIndex(parameter_count),
+ "%argc"),
+ graph()->start());
+ DCHECK_EQ(environment()->Context()->opcode(), IrOpcode::kParameter);
+ Node* context = environment()->Context();
- Environment* env = environment();
- Node* control = env->GetControlDependency();
- Node* effect = env->GetEffectDependency();
+ NewNode(simplified()->TierUpCheck(), feedback_vector_node(), target,
+ new_target, argc, context);
+}
- effect = graph()->NewNode(simplified()->TierUpCheck(), feedback_vector_node(),
- effect, control);
+void BytecodeGraphBuilder::MaybeBuildIncrementInvocationCount() {
+ if (!generate_full_feedback_collection()) return;
- env->UpdateEffectDependency(effect);
+ Node* current_invocation_count =
+ NewNode(simplified()->LoadField(
+ AccessBuilder::ForFeedbackVectorInvocationCount()),
+ feedback_vector_node());
+ Node* next_invocation_count =
+ NewNode(simplified()->NumberAdd(), current_invocation_count,
+ jsgraph()->SmiConstant(1));
+ NewNode(simplified()->StoreField(
+ AccessBuilder::ForFeedbackVectorInvocationCount()),
+ feedback_vector_node(), next_invocation_count);
}
Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
@@ -1190,6 +1193,7 @@ void BytecodeGraphBuilder::CreateGraph() {
CreateFeedbackCellNode();
CreateFeedbackVectorNode();
MaybeBuildTierUpCheck();
+ MaybeBuildIncrementInvocationCount();
CreateNativeContextNode();
VisitBytecodes();
@@ -1828,7 +1832,7 @@ BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::CheckContextExtensions(
// in the same scope as the variable itself has no way of shadowing it.
Environment* slow_environment = nullptr;
for (uint32_t d = 0; d < depth; d++) {
- if (scope_info.HasContextExtension()) {
+ if (scope_info.HasContextExtensionSlot()) {
slow_environment = CheckContextExtensionAtDepth(slow_environment, d);
}
DCHECK_IMPLIES(!scope_info.HasOuterScopeInfo(), d + 1 == depth);
@@ -2015,7 +2019,7 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
const Operator* op = javascript()->LoadNamed(name.object(), feedback);
JSTypeHintLowering::LoweringResult lowering =
- TryBuildSimplifiedLoadNamed(op, object, feedback.slot);
+ TryBuildSimplifiedLoadNamed(op, feedback.slot);
if (lowering.IsExit()) return;
Node* node = nullptr;
@@ -2048,10 +2052,24 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyFromSuper() {
Node* home_object = environment()->LookupAccumulator();
NameRef name(broker(),
bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
- const Operator* op = javascript()->LoadNamedFromSuper(name.object());
- // TODO(marja, v8:9237): Use lowering.
- Node* node = NewNode(op, receiver, home_object);
+ FeedbackSource feedback =
+ CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
+ const Operator* op =
+ javascript()->LoadNamedFromSuper(name.object(), feedback);
+
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedLoadNamed(op, feedback.slot);
+ if (lowering.IsExit()) return;
+
+ Node* node = nullptr;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
+ } else {
+ DCHECK(!lowering.Changed());
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
+ node = NewNode(op, receiver, home_object, feedback_vector_node());
+ }
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -2903,6 +2921,31 @@ void BytecodeGraphBuilder::VisitThrowSuperAlreadyCalledIfNotHole() {
Runtime::kThrowSuperAlreadyCalledError);
}
+void BytecodeGraphBuilder::VisitThrowIfNotSuperConstructor() {
+ Node* constructor =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* check_is_constructor =
+ NewNode(simplified()->ObjectIsConstructor(), constructor);
+ NewBranch(check_is_constructor, BranchHint::kTrue);
+ {
+ SubEnvironment sub_environment(this);
+ NewIfFalse();
+ BuildLoopExitsForFunctionExit(bytecode_analysis().GetInLivenessFor(
+ bytecode_iterator().current_offset()));
+ Node* node =
+ NewNode(javascript()->CallRuntime(Runtime::kThrowNotSuperConstructor),
+ constructor, GetFunctionClosure());
+ environment()->RecordAfterState(node, Environment::kAttachFrameState);
+ Node* control = NewNode(common()->Throw());
+ MergeControlToLeaveFunction(control);
+ }
+ NewIfTrue();
+
+ constructor = NewNode(common()->TypeGuard(Type::Callable()), constructor);
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0),
+ constructor);
+}
+
void BytecodeGraphBuilder::BuildUnaryOp(const Operator* op) {
DCHECK(JSOperator::IsUnaryWithFeedback(op->opcode()));
PrepareEagerCheckpoint();
@@ -2952,8 +2995,7 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
}
// Helper function to create for-in mode from the recorded type feedback.
-ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
- FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
+ForInMode BytecodeGraphBuilder::GetForInMode(FeedbackSlot slot) {
FeedbackSource source(feedback_vector(), slot);
switch (broker()->GetFeedbackForForIn(source)) {
case ForInHint::kNone:
@@ -3610,7 +3652,9 @@ void BytecodeGraphBuilder::VisitForInPrepare() {
TryBuildSimplifiedForInPrepare(enumerator, slot);
if (lowering.IsExit()) return;
DCHECK(!lowering.Changed());
- Node* node = NewNode(javascript()->ForInPrepare(GetForInMode(1)), enumerator);
+ FeedbackSource feedback = CreateFeedbackSource(slot);
+ Node* node = NewNode(javascript()->ForInPrepare(GetForInMode(slot), feedback),
+ enumerator, feedback_vector_node());
environment()->BindRegistersToProjections(
bytecode_iterator().GetRegisterOperand(0), node);
}
@@ -3639,12 +3683,9 @@ void BytecodeGraphBuilder::VisitForInNext() {
Node* cache_array = environment()->LookupRegister(
interpreter::Register(catch_reg_pair_index + 1));
- // We need to rename the {index} here, as in case of OSR we loose the
+ // We need to rename the {index} here, as in case of OSR we lose the
// information that the {index} is always a valid unsigned Smi value.
- index = graph()->NewNode(common()->TypeGuard(Type::UnsignedSmall()), index,
- environment()->GetEffectDependency(),
- environment()->GetControlDependency());
- environment()->UpdateEffectDependency(index);
+ index = NewNode(common()->TypeGuard(Type::UnsignedSmall()), index);
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(3);
JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedForInNext(
@@ -3652,8 +3693,10 @@ void BytecodeGraphBuilder::VisitForInNext() {
if (lowering.IsExit()) return;
DCHECK(!lowering.Changed());
- Node* node = NewNode(javascript()->ForInNext(GetForInMode(3)), receiver,
- cache_array, cache_type, index);
+ FeedbackSource feedback = CreateFeedbackSource(slot);
+ Node* node =
+ NewNode(javascript()->ForInNext(GetForInMode(slot), feedback), receiver,
+ cache_array, cache_type, index, feedback_vector_node());
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -4071,13 +4114,13 @@ void BytecodeGraphBuilder::BuildJumpIfJSReceiver() {
}
void BytecodeGraphBuilder::BuildUpdateInterruptBudget(int delta) {
- if (native_context_independent()) {
- // Keep uses of this in sync with Ignition's UpdateInterruptBudget.
- int delta_with_current_bytecode =
- delta - bytecode_iterator().current_bytecode_size();
- NewNode(simplified()->UpdateInterruptBudget(delta_with_current_bytecode),
- feedback_cell_node());
- }
+ if (!CodeKindCanTierUp(code_kind())) return;
+
+ // Keep uses of this in sync with Ignition's UpdateInterruptBudget.
+ int delta_with_current_bytecode =
+ delta - bytecode_iterator().current_bytecode_size();
+ NewNode(simplified()->UpdateInterruptBudget(delta_with_current_bytecode),
+ feedback_cell_node());
}
JSTypeHintLowering::LoweringResult
@@ -4193,14 +4236,12 @@ BytecodeGraphBuilder::TryBuildSimplifiedGetIterator(const Operator* op,
JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedLoadNamed(const Operator* op,
- Node* receiver,
FeedbackSlot slot) {
if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult early_reduction =
- type_hint_lowering().ReduceLoadNamedOperation(op, receiver, effect,
- control, slot);
+ type_hint_lowering().ReduceLoadNamedOperation(op, effect, control, slot);
ApplyEarlyReduction(early_reduction);
return early_reduction;
}
@@ -4472,17 +4513,18 @@ void BytecodeGraphBuilder::UpdateSourcePosition(int offset) {
void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
SharedFunctionInfoRef const& shared_info,
- FeedbackVectorRef const& feedback_vector,
+ FeedbackCellRef const& feedback_cell,
BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
int inlining_id, CodeKind code_kind,
BytecodeGraphBuilderFlags flags,
TickCounter* tick_counter) {
- DCHECK(broker->IsSerializedForCompilation(shared_info, feedback_vector));
+ DCHECK(broker->IsSerializedForCompilation(
+ shared_info, feedback_cell.value().AsFeedbackVector()));
BytecodeGraphBuilder builder(
broker, local_zone, broker->target_native_context(), shared_info,
- feedback_vector, osr_offset, jsgraph, invocation_frequency,
+ feedback_cell, osr_offset, jsgraph, invocation_frequency,
source_positions, inlining_id, code_kind, flags, tick_counter);
builder.CreateGraph();
}
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index a8423904f8..501451ec55 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -41,7 +41,7 @@ using BytecodeGraphBuilderFlags = base::Flags<BytecodeGraphBuilderFlag>;
// on AIX (v8:8193).
void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
SharedFunctionInfoRef const& shared_info,
- FeedbackVectorRef const& feedback_vector,
+ FeedbackCellRef const& feedback_cell,
BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
diff --git a/deps/v8/src/compiler/bytecode-liveness-map.h b/deps/v8/src/compiler/bytecode-liveness-map.h
index b377b55ecb..c68492d8bf 100644
--- a/deps/v8/src/compiler/bytecode-liveness-map.h
+++ b/deps/v8/src/compiler/bytecode-liveness-map.h
@@ -20,6 +20,8 @@ class BytecodeLivenessState : public ZoneObject {
public:
BytecodeLivenessState(int register_count, Zone* zone)
: bit_vector_(register_count + 1, zone) {}
+ BytecodeLivenessState(const BytecodeLivenessState&) = delete;
+ BytecodeLivenessState& operator=(const BytecodeLivenessState&) = delete;
const BitVector& bit_vector() const { return bit_vector_; }
@@ -71,8 +73,6 @@ class BytecodeLivenessState : public ZoneObject {
private:
BitVector bit_vector_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeLivenessState);
};
struct BytecodeLiveness {
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 5b395067f0..2c5338b0d7 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -4,9 +4,8 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
-
+#include "src/compiler/globals.h"
#include "src/compiler/linkage.h"
-
#include "src/zone/zone.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 273058ba25..185a2d0670 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -318,17 +318,18 @@ TNode<Float64T> CodeAssembler::Float64Constant(double value) {
bool CodeAssembler::ToInt32Constant(Node* node, int32_t* out_value) {
{
Int64Matcher m(node);
- if (m.HasValue() && m.IsInRange(std::numeric_limits<int32_t>::min(),
- std::numeric_limits<int32_t>::max())) {
- *out_value = static_cast<int32_t>(m.Value());
+ if (m.HasResolvedValue() &&
+ m.IsInRange(std::numeric_limits<int32_t>::min(),
+ std::numeric_limits<int32_t>::max())) {
+ *out_value = static_cast<int32_t>(m.ResolvedValue());
return true;
}
}
{
Int32Matcher m(node);
- if (m.HasValue()) {
- *out_value = m.Value();
+ if (m.HasResolvedValue()) {
+ *out_value = m.ResolvedValue();
return true;
}
}
@@ -338,8 +339,8 @@ bool CodeAssembler::ToInt32Constant(Node* node, int32_t* out_value) {
bool CodeAssembler::ToInt64Constant(Node* node, int64_t* out_value) {
Int64Matcher m(node);
- if (m.HasValue()) *out_value = m.Value();
- return m.HasValue();
+ if (m.HasResolvedValue()) *out_value = m.ResolvedValue();
+ return m.HasResolvedValue();
}
bool CodeAssembler::ToSmiConstant(Node* node, Smi* out_value) {
@@ -347,8 +348,8 @@ bool CodeAssembler::ToSmiConstant(Node* node, Smi* out_value) {
node = node->InputAt(0);
}
IntPtrMatcher m(node);
- if (m.HasValue()) {
- intptr_t value = m.Value();
+ if (m.HasResolvedValue()) {
+ intptr_t value = m.ResolvedValue();
// Make sure that the value is actually a smi
CHECK_EQ(0, value & ((static_cast<intptr_t>(1) << kSmiShiftSize) - 1));
*out_value = Smi(static_cast<Address>(value));
@@ -363,8 +364,8 @@ bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t* out_value) {
node = node->InputAt(0);
}
IntPtrMatcher m(node);
- if (m.HasValue()) *out_value = m.Value();
- return m.HasValue();
+ if (m.HasResolvedValue()) *out_value = m.ResolvedValue();
+ return m.HasResolvedValue();
}
bool CodeAssembler::IsUndefinedConstant(TNode<Object> node) {
@@ -377,7 +378,7 @@ bool CodeAssembler::IsNullConstant(TNode<Object> node) {
return m.Is(isolate()->factory()->null_value());
}
-Node* CodeAssembler::Parameter(int index) {
+Node* CodeAssembler::UntypedParameter(int index) {
if (index == kTargetParameterIndex) return raw_assembler()->TargetParameter();
return raw_assembler()->Parameter(index);
}
@@ -390,8 +391,8 @@ bool CodeAssembler::IsJSFunctionCall() const {
TNode<Context> CodeAssembler::GetJSContextParameter() {
auto call_descriptor = raw_assembler()->call_descriptor();
DCHECK(call_descriptor->IsJSFunctionCall());
- return CAST(Parameter(Linkage::GetJSCallContextParamIndex(
- static_cast<int>(call_descriptor->JSParameterCount()))));
+ return Parameter<Context>(Linkage::GetJSCallContextParamIndex(
+ static_cast<int>(call_descriptor->JSParameterCount())));
}
void CodeAssembler::Return(TNode<Object> value) {
@@ -802,19 +803,21 @@ Node* CodeAssembler::AtomicStore(MachineRepresentation rep, Node* base,
return raw_assembler()->AtomicStore(rep, base, offset, value, value_high);
}
-#define ATOMIC_FUNCTION(name) \
- Node* CodeAssembler::Atomic##name(MachineType type, Node* base, \
- Node* offset, Node* value, \
- Node* value_high) { \
- return raw_assembler()->Atomic##name(type, base, offset, value, \
- value_high); \
+#define ATOMIC_FUNCTION(name) \
+ Node* CodeAssembler::Atomic##name( \
+ MachineType type, TNode<RawPtrT> base, TNode<UintPtrT> offset, \
+ Node* value, base::Optional<TNode<UintPtrT>> value_high) { \
+ Node* value_high_node = nullptr; \
+ if (value_high) value_high_node = *value_high; \
+ return raw_assembler()->Atomic##name(type, base, offset, value, \
+ value_high_node); \
}
-ATOMIC_FUNCTION(Exchange)
ATOMIC_FUNCTION(Add)
ATOMIC_FUNCTION(Sub)
ATOMIC_FUNCTION(And)
ATOMIC_FUNCTION(Or)
ATOMIC_FUNCTION(Xor)
+ATOMIC_FUNCTION(Exchange)
#undef ATOMIC_FUNCTION
Node* CodeAssembler::AtomicCompareExchange(MachineType type, Node* base,
@@ -835,10 +838,6 @@ Node* CodeAssembler::StoreRoot(RootIndex root_index, Node* value) {
value);
}
-Node* CodeAssembler::Retain(Node* value) {
- return raw_assembler()->Retain(value);
-}
-
Node* CodeAssembler::Projection(int index, Node* value) {
DCHECK_LT(index, value->op()->ValueOutputCount());
return raw_assembler()->Projection(index, value);
@@ -894,7 +893,7 @@ class NodeArray {
};
} // namespace
-TNode<Object> CodeAssembler::CallRuntimeImpl(
+Node* CodeAssembler::CallRuntimeImpl(
Runtime::FunctionId function, TNode<Object> context,
std::initializer_list<TNode<Object>> args) {
int result_size = Runtime::FunctionForId(function)->result_size;
@@ -924,7 +923,7 @@ TNode<Object> CodeAssembler::CallRuntimeImpl(
raw_assembler()->CallN(call_descriptor, inputs.size(), inputs.data());
HandleException(return_value);
CallEpilogue();
- return UncheckedCast<Object>(return_value);
+ return return_value;
}
void CodeAssembler::TailCallRuntimeImpl(
@@ -955,8 +954,7 @@ void CodeAssembler::TailCallRuntimeImpl(
Node* CodeAssembler::CallStubN(StubCallMode call_mode,
const CallInterfaceDescriptor& descriptor,
- size_t result_size, int input_count,
- Node* const* inputs) {
+ int input_count, Node* const* inputs) {
DCHECK(call_mode == StubCallMode::kCallCodeObject ||
call_mode == StubCallMode::kCallBuiltinPointer);
@@ -974,7 +972,6 @@ Node* CodeAssembler::CallStubN(StubCallMode call_mode,
// Extra arguments not mentioned in the descriptor are passed on the stack.
int stack_parameter_count = argc - descriptor.GetRegisterParameterCount();
DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
- DCHECK_EQ(result_size, descriptor.GetReturnCount());
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), descriptor, stack_parameter_count, CallDescriptor::kNoFlags,
@@ -1010,8 +1007,7 @@ void CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode,
const CallInterfaceDescriptor& descriptor,
- size_t result_size, TNode<Object> target,
- TNode<Object> context,
+ TNode<Object> target, TNode<Object> context,
std::initializer_list<Node*> args) {
DCHECK(call_mode == StubCallMode::kCallCodeObject ||
call_mode == StubCallMode::kCallBuiltinPointer);
@@ -1026,8 +1022,7 @@ Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode,
inputs.Add(context);
}
- return CallStubN(call_mode, descriptor, result_size, inputs.size(),
- inputs.data());
+ return CallStubN(call_mode, descriptor, inputs.size(), inputs.data());
}
Node* CodeAssembler::CallJSStubImpl(const CallInterfaceDescriptor& descriptor,
@@ -1049,7 +1044,7 @@ Node* CodeAssembler::CallJSStubImpl(const CallInterfaceDescriptor& descriptor,
if (descriptor.HasContextParameter()) {
inputs.Add(context);
}
- return CallStubN(StubCallMode::kCallCodeObject, descriptor, 1, inputs.size(),
+ return CallStubN(StubCallMode::kCallCodeObject, descriptor, inputs.size(),
inputs.data());
}
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 203e1eea37..792ecd385c 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -8,9 +8,11 @@
#include <initializer_list>
#include <map>
#include <memory>
+#include <sstream>
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
+#include "include/cppgc/source-location.h"
#include "src/base/macros.h"
#include "src/base/type-traits.h"
#include "src/builtins/builtins.h"
@@ -376,10 +378,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
explicit CodeAssembler(CodeAssemblerState* state) : state_(state) {}
~CodeAssembler();
+ CodeAssembler(const CodeAssembler&) = delete;
+ CodeAssembler& operator=(const CodeAssembler&) = delete;
+
static Handle<Code> GenerateCode(CodeAssemblerState* state,
const AssemblerOptions& options,
const ProfileDataFromFile* profile_data);
-
bool Is64() const;
bool Is32() const;
bool IsFloat64RoundUpSupported() const;
@@ -566,7 +570,30 @@ class V8_EXPORT_PRIVATE CodeAssembler {
static constexpr int kTargetParameterIndex = -1;
- Node* Parameter(int value);
+ template <class T>
+ TNode<T> Parameter(
+ int value, cppgc::SourceLocation loc = cppgc::SourceLocation::Current()) {
+ static_assert(
+ std::is_convertible<TNode<T>, TNode<Object>>::value,
+ "Parameter is only for tagged types. Use UncheckedParameter instead.");
+ std::stringstream message;
+ message << "Parameter " << value;
+ if (loc.FileName()) {
+ message << " at " << loc.FileName() << ":" << loc.Line();
+ }
+ size_t buf_size = message.str().size() + 1;
+ char* message_dup = zone()->NewArray<char>(buf_size);
+ snprintf(message_dup, buf_size, "%s", message.str().c_str());
+
+ return Cast(UntypedParameter(value), message_dup);
+ }
+
+ template <class T>
+ TNode<T> UncheckedParameter(int value) {
+ return UncheckedCast<T>(UntypedParameter(value));
+ }
+
+ Node* UntypedParameter(int value);
TNode<Context> GetJSContextParameter();
void Return(TNode<Object> value);
@@ -758,31 +785,31 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* AtomicStore(MachineRepresentation rep, Node* base, Node* offset,
Node* value, Node* value_high = nullptr);
+ Node* AtomicAdd(MachineType type, TNode<RawPtrT> base, TNode<UintPtrT> offset,
+ Node* value, base::Optional<TNode<UintPtrT>> value_high);
+
+ Node* AtomicSub(MachineType type, TNode<RawPtrT> base, TNode<UintPtrT> offset,
+ Node* value, base::Optional<TNode<UintPtrT>> value_high);
+
+ Node* AtomicAnd(MachineType type, TNode<RawPtrT> base, TNode<UintPtrT> offset,
+ Node* value, base::Optional<TNode<UintPtrT>> value_high);
+
+ Node* AtomicOr(MachineType type, TNode<RawPtrT> base, TNode<UintPtrT> offset,
+ Node* value, base::Optional<TNode<UintPtrT>> value_high);
+
+ Node* AtomicXor(MachineType type, TNode<RawPtrT> base, TNode<UintPtrT> offset,
+ Node* value, base::Optional<TNode<UintPtrT>> value_high);
+
// Exchange value at raw memory location
- Node* AtomicExchange(MachineType type, Node* base, Node* offset, Node* value,
- Node* value_high = nullptr);
+ Node* AtomicExchange(MachineType type, TNode<RawPtrT> base,
+ TNode<UintPtrT> offset, Node* value,
+ base::Optional<TNode<UintPtrT>> value_high);
// Compare and Exchange value at raw memory location
Node* AtomicCompareExchange(MachineType type, Node* base, Node* offset,
Node* old_value, Node* new_value,
Node* old_value_high = nullptr,
Node* new_value_high = nullptr);
-
- Node* AtomicAdd(MachineType type, Node* base, Node* offset, Node* value,
- Node* value_high = nullptr);
-
- Node* AtomicSub(MachineType type, Node* base, Node* offset, Node* value,
- Node* value_high = nullptr);
-
- Node* AtomicAnd(MachineType type, Node* base, Node* offset, Node* value,
- Node* value_high = nullptr);
-
- Node* AtomicOr(MachineType type, Node* base, Node* offset, Node* value,
- Node* value_high = nullptr);
-
- Node* AtomicXor(MachineType type, Node* base, Node* offset, Node* value,
- Node* value_high = nullptr);
-
// Store a value to the root array.
Node* StoreRoot(RootIndex root_index, Node* value);
@@ -964,10 +991,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// kSetOverflowToMin.
TNode<Int32T> TruncateFloat32ToInt32(SloppyTNode<Float32T> value);
- // No-op that guarantees that the value is kept alive till this point even
- // if GC happens.
- Node* Retain(Node* value);
-
// Projections
Node* Projection(int index, Node* value);
@@ -980,11 +1003,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
}
// Calls
- template <class... TArgs>
- TNode<Object> CallRuntime(Runtime::FunctionId function, TNode<Object> context,
- TArgs... args) {
- return CallRuntimeImpl(function, context,
- {implicit_cast<TNode<Object>>(args)...});
+ template <class T = Object, class... TArgs>
+ TNode<T> CallRuntime(Runtime::FunctionId function, TNode<Object> context,
+ TArgs... args) {
+ return UncheckedCast<T>(CallRuntimeImpl(
+ function, context, {implicit_cast<TNode<Object>>(args)...}));
}
template <class... TArgs>
@@ -1018,27 +1041,15 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<T> CallStub(const CallInterfaceDescriptor& descriptor,
TNode<Code> target, TNode<Object> context, TArgs... args) {
return UncheckedCast<T>(CallStubR(StubCallMode::kCallCodeObject, descriptor,
- 1, target, context, args...));
+ target, context, args...));
}
- template <class... TArgs>
- Node* CallStubR(StubCallMode call_mode,
- const CallInterfaceDescriptor& descriptor, size_t result_size,
- TNode<Object> target, TNode<Object> context, TArgs... args) {
- return CallStubRImpl(call_mode, descriptor, result_size, target, context,
- {args...});
- }
-
- Node* CallStubN(StubCallMode call_mode,
- const CallInterfaceDescriptor& descriptor, size_t result_size,
- int input_count, Node* const* inputs);
-
template <class T = Object, class... TArgs>
TNode<T> CallBuiltinPointer(const CallInterfaceDescriptor& descriptor,
TNode<BuiltinPtr> target, TNode<Object> context,
TArgs... args) {
return UncheckedCast<T>(CallStubR(StubCallMode::kCallBuiltinPointer,
- descriptor, 1, target, context, args...));
+ descriptor, target, context, args...));
}
template <class... TArgs>
@@ -1185,9 +1196,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* function, MachineType return_type, SaveFPRegsMode mode,
std::initializer_list<CFunctionArg> args);
- TNode<Object> CallRuntimeImpl(Runtime::FunctionId function,
- TNode<Object> context,
- std::initializer_list<TNode<Object>> args);
+ Node* CallRuntimeImpl(Runtime::FunctionId function, TNode<Object> context,
+ std::initializer_list<TNode<Object>> args);
void TailCallRuntimeImpl(Runtime::FunctionId function, TNode<Int32T> arity,
TNode<Object> context,
@@ -1201,16 +1211,27 @@ class V8_EXPORT_PRIVATE CodeAssembler {
const CallInterfaceDescriptor& descriptor, Node* target, Node* context,
std::initializer_list<Node*> args);
+ template <class... TArgs>
+ Node* CallStubR(StubCallMode call_mode,
+ const CallInterfaceDescriptor& descriptor,
+ TNode<Object> target, TNode<Object> context, TArgs... args) {
+ return CallStubRImpl(call_mode, descriptor, target, context, {args...});
+ }
+
Node* CallStubRImpl(StubCallMode call_mode,
const CallInterfaceDescriptor& descriptor,
- size_t result_size, TNode<Object> target,
- TNode<Object> context, std::initializer_list<Node*> args);
+ TNode<Object> target, TNode<Object> context,
+ std::initializer_list<Node*> args);
Node* CallJSStubImpl(const CallInterfaceDescriptor& descriptor,
TNode<Object> target, TNode<Object> context,
TNode<Object> function, TNode<Object> new_target,
TNode<Int32T> arity, std::initializer_list<Node*> args);
+ Node* CallStubN(StubCallMode call_mode,
+ const CallInterfaceDescriptor& descriptor, int input_count,
+ Node* const* inputs);
+
// These two don't have definitions and are here only for catching use cases
// where the cast is not necessary.
TNode<Int32T> Signed(TNode<Int32T> x);
@@ -1224,8 +1245,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void CallEpilogue();
CodeAssemblerState* state_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeAssembler);
};
// TODO(solanes, v8:6949): this class should be merged into
@@ -1233,6 +1252,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// CodeAssemblerVariableLists.
class V8_EXPORT_PRIVATE CodeAssemblerVariable {
public:
+ CodeAssemblerVariable(const CodeAssemblerVariable&) = delete;
+ CodeAssemblerVariable& operator=(const CodeAssemblerVariable&) = delete;
+
Node* value() const;
MachineRepresentation rep() const;
bool IsBound() const;
@@ -1264,7 +1286,6 @@ class V8_EXPORT_PRIVATE CodeAssemblerVariable {
};
Impl* impl_;
CodeAssemblerState* state_;
- DISALLOW_COPY_AND_ASSIGN(CodeAssemblerVariable);
};
std::ostream& operator<<(std::ostream&, const CodeAssemblerVariable&);
@@ -1331,6 +1352,11 @@ class V8_EXPORT_PRIVATE CodeAssemblerLabel {
: CodeAssemblerLabel(assembler, 1, &merged_variable, type) {}
~CodeAssemblerLabel();
+ // Cannot be copied because the destructor explicitly call the destructor of
+ // the underlying {RawMachineLabel}, hence only one pointer can point to it.
+ CodeAssemblerLabel(const CodeAssemblerLabel&) = delete;
+ CodeAssemblerLabel& operator=(const CodeAssemblerLabel&) = delete;
+
inline bool is_bound() const { return bound_; }
inline bool is_used() const { return merge_count_ != 0; }
@@ -1358,10 +1384,6 @@ class V8_EXPORT_PRIVATE CodeAssemblerLabel {
std::map<CodeAssemblerVariable::Impl*, std::vector<Node*>,
CodeAssemblerVariable::ImplComparator>
variable_merges_;
-
- // Cannot be copied because the destructor explicitly call the destructor of
- // the underlying {RawMachineLabel}, hence only one pointer can point to it.
- DISALLOW_COPY_AND_ASSIGN(CodeAssemblerLabel);
};
class CodeAssemblerParameterizedLabelBase {
@@ -1442,6 +1464,9 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
~CodeAssemblerState();
+ CodeAssemblerState(const CodeAssemblerState&) = delete;
+ CodeAssemblerState& operator=(const CodeAssemblerState&) = delete;
+
const char* name() const { return name_; }
int parameter_count() const;
@@ -1485,8 +1510,6 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
std::vector<FileAndLine> macro_call_stack_;
VariableId NextVariableId() { return next_variable_id_++; }
-
- DISALLOW_COPY_AND_ASSIGN(CodeAssemblerState);
};
class V8_EXPORT_PRIVATE ScopedExceptionHandler {
diff --git a/deps/v8/src/compiler/common-node-cache.h b/deps/v8/src/compiler/common-node-cache.h
index b1a8370a7f..561f5e61f4 100644
--- a/deps/v8/src/compiler/common-node-cache.h
+++ b/deps/v8/src/compiler/common-node-cache.h
@@ -36,6 +36,9 @@ class CommonNodeCache final {
relocatable_int64_constants_(zone) {}
~CommonNodeCache() = default;
+ CommonNodeCache(const CommonNodeCache&) = delete;
+ CommonNodeCache& operator=(const CommonNodeCache&) = delete;
+
Node** FindInt32Constant(int32_t value) {
return int32_constants_.Find(value);
}
@@ -94,8 +97,6 @@ class CommonNodeCache final {
IntPtrNodeCache heap_constants_;
RelocInt32NodeCache relocatable_int32_constants_;
RelocInt64NodeCache relocatable_int64_constants_;
-
- DISALLOW_COPY_AND_ASSIGN(CommonNodeCache);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index c04617c244..70f4bbf47b 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -20,18 +20,15 @@ namespace compiler {
namespace {
Decision DecideCondition(JSHeapBroker* broker, Node* const cond) {
- switch (cond->opcode()) {
- case IrOpcode::kFoldConstant: {
- return DecideCondition(broker, cond->InputAt(1));
- }
+ Node* unwrapped = SkipValueIdentities(cond);
+ switch (unwrapped->opcode()) {
case IrOpcode::kInt32Constant: {
- Int32Matcher mcond(cond);
- return mcond.Value() ? Decision::kTrue : Decision::kFalse;
+ Int32Matcher m(unwrapped);
+ return m.ResolvedValue() ? Decision::kTrue : Decision::kFalse;
}
case IrOpcode::kHeapConstant: {
- HeapObjectMatcher mcond(cond);
- return mcond.Ref(broker).BooleanValue() ? Decision::kTrue
- : Decision::kFalse;
+ HeapObjectMatcher m(unwrapped);
+ return m.Ref(broker).BooleanValue() ? Decision::kTrue : Decision::kFalse;
}
default:
return Decision::kUnknown;
@@ -436,7 +433,7 @@ Reduction CommonOperatorReducer::ReduceSwitch(Node* node) {
// non-matching cases as dead code (same for an unused IfDefault), because the
// Switch itself will be marked as dead code.
Int32Matcher mswitched(switched_value);
- if (mswitched.HasValue()) {
+ if (mswitched.HasResolvedValue()) {
bool matched = false;
size_t const projection_count = node->op()->ControlOutputCount();
@@ -447,7 +444,7 @@ Reduction CommonOperatorReducer::ReduceSwitch(Node* node) {
Node* if_value = projections[i];
DCHECK_EQ(IrOpcode::kIfValue, if_value->opcode());
const IfValueParameters& p = IfValueParametersOf(if_value->op());
- if (p.value() == mswitched.Value()) {
+ if (p.value() == mswitched.ResolvedValue()) {
matched = true;
Replace(if_value, control);
break;
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index a125113b6b..8a1bfca8c7 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -569,7 +569,7 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
V(TrapDivUnrepresentable) \
V(TrapRemByZero) \
V(TrapFloatUnrepresentable) \
- V(TrapFuncInvalid) \
+ V(TrapTableOutOfBounds) \
V(TrapFuncSigMismatch)
#define CACHED_PARAMETER_LIST(V) \
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index f68780394a..b6cede1cc5 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -459,6 +459,8 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
: public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit CommonOperatorBuilder(Zone* zone);
+ CommonOperatorBuilder(const CommonOperatorBuilder&) = delete;
+ CommonOperatorBuilder& operator=(const CommonOperatorBuilder&) = delete;
const Operator* Dead();
const Operator* DeadValue(MachineRepresentation rep);
@@ -563,8 +565,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const CommonOperatorGlobalCache& cache_;
Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(CommonOperatorBuilder);
};
// Node wrappers.
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index 263a5a5f1e..5fc6007114 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -170,8 +170,9 @@ class FieldRepresentationDependency final : public CompilationDependency {
bool IsValid() const override {
DisallowHeapAllocation no_heap_allocation;
Handle<Map> owner = owner_.object();
- return representation_.Equals(
- owner->instance_descriptors().GetDetails(descriptor_).representation());
+ return representation_.Equals(owner->instance_descriptors(kRelaxedLoad)
+ .GetDetails(descriptor_)
+ .representation());
}
void Install(const MaybeObjectHandle& code) const override {
@@ -208,7 +209,8 @@ class FieldTypeDependency final : public CompilationDependency {
DisallowHeapAllocation no_heap_allocation;
Handle<Map> owner = owner_.object();
Handle<Object> type = type_.object();
- return *type == owner->instance_descriptors().GetFieldType(descriptor_);
+ return *type ==
+ owner->instance_descriptors(kRelaxedLoad).GetFieldType(descriptor_);
}
void Install(const MaybeObjectHandle& code) const override {
@@ -236,7 +238,9 @@ class FieldConstnessDependency final : public CompilationDependency {
DisallowHeapAllocation no_heap_allocation;
Handle<Map> owner = owner_.object();
return PropertyConstness::kConst ==
- owner->instance_descriptors().GetDetails(descriptor_).constness();
+ owner->instance_descriptors(kRelaxedLoad)
+ .GetDetails(descriptor_)
+ .constness();
}
void Install(const MaybeObjectHandle& code) const override {
diff --git a/deps/v8/src/compiler/compiler-source-position-table.h b/deps/v8/src/compiler/compiler-source-position-table.h
index 6c3ab684a8..9974a2daad 100644
--- a/deps/v8/src/compiler/compiler-source-position-table.h
+++ b/deps/v8/src/compiler/compiler-source-position-table.h
@@ -30,6 +30,8 @@ class V8_EXPORT_PRIVATE SourcePositionTable final
Init(source_positions_->GetSourcePosition(node));
}
~Scope() { source_positions_->current_position_ = prev_position_; }
+ Scope(const Scope&) = delete;
+ Scope& operator=(const Scope&) = delete;
private:
void Init(SourcePosition position) {
@@ -38,10 +40,11 @@ class V8_EXPORT_PRIVATE SourcePositionTable final
SourcePositionTable* const source_positions_;
SourcePosition const prev_position_;
- DISALLOW_COPY_AND_ASSIGN(Scope);
};
explicit SourcePositionTable(Graph* graph);
+ SourcePositionTable(const SourcePositionTable&) = delete;
+ SourcePositionTable& operator=(const SourcePositionTable&) = delete;
void AddDecorator();
void RemoveDecorator();
@@ -63,8 +66,6 @@ class V8_EXPORT_PRIVATE SourcePositionTable final
Decorator* decorator_;
SourcePosition current_position_;
NodeAuxData<SourcePosition, SourcePosition::Unknown> table_;
-
- DISALLOW_COPY_AND_ASSIGN(SourcePositionTable);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/constant-folding-reducer.h b/deps/v8/src/compiler/constant-folding-reducer.h
index f98ab0595e..88f0cb6a57 100644
--- a/deps/v8/src/compiler/constant-folding-reducer.h
+++ b/deps/v8/src/compiler/constant-folding-reducer.h
@@ -20,6 +20,8 @@ class V8_EXPORT_PRIVATE ConstantFoldingReducer final
ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
JSHeapBroker* broker);
~ConstantFoldingReducer() final;
+ ConstantFoldingReducer(const ConstantFoldingReducer&) = delete;
+ ConstantFoldingReducer& operator=(const ConstantFoldingReducer&) = delete;
const char* reducer_name() const override { return "ConstantFoldingReducer"; }
@@ -31,8 +33,6 @@ class V8_EXPORT_PRIVATE ConstantFoldingReducer final
JSGraph* const jsgraph_;
JSHeapBroker* const broker_;
-
- DISALLOW_COPY_AND_ASSIGN(ConstantFoldingReducer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/control-flow-optimizer.cc b/deps/v8/src/compiler/control-flow-optimizer.cc
index 930976fbed..5bcee55285 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.cc
+++ b/deps/v8/src/compiler/control-flow-optimizer.cc
@@ -79,8 +79,8 @@ bool ControlFlowOptimizer::TryBuildSwitch(Node* node) {
if (cond->opcode() != IrOpcode::kWord32Equal) return false;
Int32BinopMatcher m(cond);
Node* index = m.left().node();
- if (!m.right().HasValue()) return false;
- int32_t value = m.right().Value();
+ if (!m.right().HasResolvedValue()) return false;
+ int32_t value = m.right().ResolvedValue();
ZoneSet<int32_t> values(zone());
values.insert(value);
@@ -104,8 +104,8 @@ bool ControlFlowOptimizer::TryBuildSwitch(Node* node) {
if (cond1->opcode() != IrOpcode::kWord32Equal) break;
Int32BinopMatcher m1(cond1);
if (m1.left().node() != index) break;
- if (!m1.right().HasValue()) break;
- int32_t value1 = m1.right().Value();
+ if (!m1.right().HasResolvedValue()) break;
+ int32_t value1 = m1.right().ResolvedValue();
if (values.find(value1) != values.end()) break;
DCHECK_NE(value, value1);
diff --git a/deps/v8/src/compiler/control-flow-optimizer.h b/deps/v8/src/compiler/control-flow-optimizer.h
index 07fc9e6fc2..060fed8274 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.h
+++ b/deps/v8/src/compiler/control-flow-optimizer.h
@@ -27,6 +27,8 @@ class V8_EXPORT_PRIVATE ControlFlowOptimizer final {
ControlFlowOptimizer(Graph* graph, CommonOperatorBuilder* common,
MachineOperatorBuilder* machine,
TickCounter* tick_counter, Zone* zone);
+ ControlFlowOptimizer(const ControlFlowOptimizer&) = delete;
+ ControlFlowOptimizer& operator=(const ControlFlowOptimizer&) = delete;
void Optimize();
@@ -50,8 +52,6 @@ class V8_EXPORT_PRIVATE ControlFlowOptimizer final {
NodeMarker<bool> queued_;
Zone* const zone_;
TickCounter* const tick_counter_;
-
- DISALLOW_COPY_AND_ASSIGN(ControlFlowOptimizer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/csa-load-elimination.cc b/deps/v8/src/compiler/csa-load-elimination.cc
index c29a472364..17250bba5e 100644
--- a/deps/v8/src/compiler/csa-load-elimination.cc
+++ b/deps/v8/src/compiler/csa-load-elimination.cc
@@ -94,13 +94,13 @@ bool OffsetMayAlias(Node* offset1, MachineRepresentation repr1, Node* offset2,
IntPtrMatcher matcher1(offset1);
IntPtrMatcher matcher2(offset2);
// If either of the offsets is variable, accesses may alias
- if (!matcher1.HasValue() || !matcher2.HasValue()) {
+ if (!matcher1.HasResolvedValue() || !matcher2.HasResolvedValue()) {
return true;
}
// Otherwise, we return whether accesses overlap
- intptr_t start1 = matcher1.Value();
+ intptr_t start1 = matcher1.ResolvedValue();
intptr_t end1 = start1 + ElementSizeInBytes(repr1);
- intptr_t start2 = matcher2.Value();
+ intptr_t start2 = matcher2.ResolvedValue();
intptr_t end2 = start2 + ElementSizeInBytes(repr2);
return !(end1 <= start2 || end2 <= start1);
}
diff --git a/deps/v8/src/compiler/csa-load-elimination.h b/deps/v8/src/compiler/csa-load-elimination.h
index 9460858d04..f738475a94 100644
--- a/deps/v8/src/compiler/csa-load-elimination.h
+++ b/deps/v8/src/compiler/csa-load-elimination.h
@@ -36,6 +36,8 @@ class V8_EXPORT_PRIVATE CsaLoadElimination final
jsgraph_(jsgraph),
zone_(zone) {}
~CsaLoadElimination() final = default;
+ CsaLoadElimination(const CsaLoadElimination&) = delete;
+ CsaLoadElimination& operator=(const CsaLoadElimination&) = delete;
const char* reducer_name() const override { return "CsaLoadElimination"; }
@@ -107,8 +109,6 @@ class V8_EXPORT_PRIVATE CsaLoadElimination final
NodeAuxData<AbstractState const*> node_states_;
JSGraph* const jsgraph_;
Zone* zone_;
-
- DISALLOW_COPY_AND_ASSIGN(CsaLoadElimination);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/dead-code-elimination.h b/deps/v8/src/compiler/dead-code-elimination.h
index 5f2ba329e2..7fb22838c7 100644
--- a/deps/v8/src/compiler/dead-code-elimination.h
+++ b/deps/v8/src/compiler/dead-code-elimination.h
@@ -42,6 +42,8 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
DeadCodeElimination(Editor* editor, Graph* graph,
CommonOperatorBuilder* common, Zone* temp_zone);
~DeadCodeElimination() final = default;
+ DeadCodeElimination(const DeadCodeElimination&) = delete;
+ DeadCodeElimination& operator=(const DeadCodeElimination&) = delete;
const char* reducer_name() const override { return "DeadCodeElimination"; }
@@ -76,8 +78,6 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
CommonOperatorBuilder* const common_;
Node* const dead_;
Zone* zone_;
-
- DISALLOW_COPY_AND_ASSIGN(DeadCodeElimination);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/decompression-optimizer.h b/deps/v8/src/compiler/decompression-optimizer.h
index 1d94739e45..330202d4c2 100644
--- a/deps/v8/src/compiler/decompression-optimizer.h
+++ b/deps/v8/src/compiler/decompression-optimizer.h
@@ -39,6 +39,8 @@ class V8_EXPORT_PRIVATE DecompressionOptimizer final {
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine);
~DecompressionOptimizer() = default;
+ DecompressionOptimizer(const DecompressionOptimizer&) = delete;
+ DecompressionOptimizer& operator=(const DecompressionOptimizer&) = delete;
// Assign States to the nodes, and then change the node's Operator to use the
// compressed version if possible.
@@ -114,8 +116,6 @@ class V8_EXPORT_PRIVATE DecompressionOptimizer final {
// themselves. In a way, it functions as a NodeSet since each node will be
// contained at most once. It's a Vector since we care about insertion speed.
NodeVector compressed_candidate_nodes_;
-
- DISALLOW_COPY_AND_ASSIGN(DecompressionOptimizer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 98ca00c78b..015f1cce6f 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -14,6 +14,7 @@
#include "src/compiler/feedback-source.h"
#include "src/compiler/graph-assembler.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-origin-table.h"
@@ -37,7 +38,8 @@ class EffectControlLinearizer {
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
MaskArrayIndexEnable mask_array_index,
- MaintainSchedule maintain_schedule)
+ MaintainSchedule maintain_schedule,
+ JSHeapBroker* broker)
: js_graph_(js_graph),
schedule_(schedule),
temp_zone_(temp_zone),
@@ -45,9 +47,11 @@ class EffectControlLinearizer {
maintain_schedule_(maintain_schedule),
source_positions_(source_positions),
node_origins_(node_origins),
+ broker_(broker),
graph_assembler_(js_graph, temp_zone, base::nullopt,
should_maintain_schedule() ? schedule : nullptr),
- frame_state_zapper_(nullptr) {}
+ frame_state_zapper_(nullptr),
+ fast_api_call_stack_slot_(nullptr) {}
void Run();
@@ -284,17 +288,11 @@ class EffectControlLinearizer {
DeoptimizeReason reason);
// Helper functions used in LowerDynamicCheckMaps
- void CheckPolymorphic(Node* expected_polymorphic_array, Node* actual_map,
- Node* actual_handler, GraphAssemblerLabel<0>* done,
- Node* frame_state);
- void ProcessMonomorphic(Node* handler, GraphAssemblerLabel<0>* done,
- Node* frame_state, int slot, Node* vector);
- void BranchOnICState(int slot_index, Node* vector, Node* value_map,
- Node* frame_state, GraphAssemblerLabel<0>* monomorphic,
- GraphAssemblerLabel<0>* maybe_poly,
- GraphAssemblerLabel<0>* migrate, Node** strong_feedback,
- Node** poly_array);
-
+ void BuildCallDynamicMapChecksBuiltin(Node* actual_value,
+ Node* actual_handler,
+ int feedback_slot_index,
+ GraphAssemblerLabel<0>* done,
+ Node* frame_state);
bool should_maintain_schedule() const {
return maintain_schedule_ == MaintainSchedule::kMaintain;
}
@@ -311,6 +309,7 @@ class EffectControlLinearizer {
}
MachineOperatorBuilder* machine() const { return js_graph_->machine(); }
JSGraphAssembler* gasm() { return &graph_assembler_; }
+ JSHeapBroker* broker() const { return broker_; }
JSGraph* js_graph_;
Schedule* schedule_;
@@ -320,8 +319,11 @@ class EffectControlLinearizer {
RegionObservability region_observability_ = RegionObservability::kObservable;
SourcePositionTable* source_positions_;
NodeOriginTable* node_origins_;
+ JSHeapBroker* broker_;
JSGraphAssembler graph_assembler_;
Node* frame_state_zapper_; // For tracking down compiler::Node::New crashes.
+ Node* fast_api_call_stack_slot_; // For caching the stack slot allocated for
+ // fast API calls.
};
namespace {
@@ -1887,230 +1889,65 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
}
}
-void EffectControlLinearizer::CheckPolymorphic(Node* expected_polymorphic_array,
- Node* actual_map,
- Node* actual_handler,
- GraphAssemblerLabel<0>* done,
- Node* frame_state) {
- Node* expected_polymorphic_array_map =
- __ LoadField(AccessBuilder::ForMap(), expected_polymorphic_array);
- Node* is_weak_fixed_array = __ TaggedEqual(expected_polymorphic_array_map,
- __ WeakFixedArrayMapConstant());
- __ DeoptimizeIfNot(DeoptimizeReason::kTransitionedToMegamorphicIC,
- FeedbackSource(), is_weak_fixed_array, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
-
- Node* polymorphic_array = expected_polymorphic_array;
-
- // This is now a weak pointer that we're holding in the register, we
- // need to be careful about spilling and reloading it (as it could
- // get cleared in between). There's no runtime call here that could
- // cause a spill so we should be safe.
- Node* weak_actual_map = MakeWeakForComparison(actual_map);
- Node* length = ChangeSmiToInt32(__ LoadField(
- AccessBuilder::ForWeakFixedArrayLength(), polymorphic_array));
- auto do_handler_check = __ MakeLabel(MachineRepresentation::kWord32);
-
- GraphAssemblerLabel<0> labels[] = {__ MakeLabel(), __ MakeLabel(),
- __ MakeLabel(), __ MakeLabel()};
-
- STATIC_ASSERT(FLAG_max_minimorphic_map_checks == arraysize(labels));
- DCHECK_GE(FLAG_max_minimorphic_map_checks,
- FLAG_max_valid_polymorphic_map_count);
-
- // The following generates a switch based on the length of the
- // array:
- //
- // if length >= 4: goto labels[3]
- // if length == 3: goto labels[2]
- // if length == 2: goto labels[1]
- // if length == 1: goto labels[0]
- __ GotoIf(__ Int32LessThanOrEqual(
- __ Int32Constant(FeedbackIterator::SizeFor(4)), length),
- &labels[3]);
- __ GotoIf(
- __ Word32Equal(length, __ Int32Constant(FeedbackIterator::SizeFor(3))),
- &labels[2]);
- __ GotoIf(
- __ Word32Equal(length, __ Int32Constant(FeedbackIterator::SizeFor(2))),
- &labels[1]);
- __ GotoIf(
- __ Word32Equal(length, __ Int32Constant(FeedbackIterator::SizeFor(1))),
- &labels[0]);
-
- // We should never have an polymorphic feedback array of size 0.
+void EffectControlLinearizer::BuildCallDynamicMapChecksBuiltin(
+ Node* actual_value, Node* actual_handler, int feedback_slot_index,
+ GraphAssemblerLabel<0>* done, Node* frame_state) {
+ Node* slot_index = __ IntPtrConstant(feedback_slot_index);
+ Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+ auto builtin = Builtins::kDynamicMapChecks;
+ Node* result = CallBuiltin(builtin, properties, slot_index, actual_value,
+ actual_handler);
+ __ GotoIf(__ WordEqual(result, __ IntPtrConstant(static_cast<int>(
+ DynamicMapChecksStatus::kSuccess))),
+ done);
+ __ DeoptimizeIf(DeoptimizeKind::kBailout, DeoptimizeReason::kMissingMap,
+ FeedbackSource(),
+ __ WordEqual(result, __ IntPtrConstant(static_cast<int>(
+ DynamicMapChecksStatus::kBailout))),
+ frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ __ DeoptimizeIf(DeoptimizeReason::kWrongHandler, FeedbackSource(),
+ __ WordEqual(result, __ IntPtrConstant(static_cast<int>(
+ DynamicMapChecksStatus::kDeopt))),
+ frame_state, IsSafetyCheck::kCriticalSafetyCheck);
__ Unreachable(done);
-
- // This loop generates code like this to do the dynamic map check:
- //
- // labels[3]:
- // maybe_map = load(polymorphic_array, i)
- // if weak_actual_map == maybe_map goto handler_check
- // goto labels[2]
- // labels[2]:
- // maybe_map = load(polymorphic_array, i - 1)
- // if weak_actual_map == maybe_map goto handler_check
- // goto labels[1]
- // labels[1]:
- // maybe_map = load(polymorphic_array, i - 2)
- // if weak_actual_map == maybe_map goto handler_check
- // goto labels[0]
- // labels[0]:
- // maybe_map = load(polymorphic_array, i - 3)
- // if weak_actual_map == maybe_map goto handler_check
- // bailout
- for (int i = arraysize(labels) - 1; i >= 0; i--) {
- __ Bind(&labels[i]);
- Node* maybe_map = __ LoadField(AccessBuilder::ForWeakFixedArraySlot(
- FeedbackIterator::MapIndexForEntry(i)),
- polymorphic_array);
- Node* map_check = __ TaggedEqual(maybe_map, weak_actual_map);
-
- int handler_index = FeedbackIterator::HandlerIndexForEntry(i);
- __ GotoIf(map_check, &do_handler_check, __ Int32Constant(handler_index));
- if (i > 0) {
- __ Goto(&labels[i - 1]);
- } else {
- // TODO(turbofan): Add support for gasm->Deoptimize.
- __ DeoptimizeIf(DeoptimizeKind::kBailout, DeoptimizeReason::kMissingMap,
- FeedbackSource(), __ IntPtrConstant(1),
- FrameState(frame_state));
- __ Unreachable(done);
- }
- }
-
- __ Bind(&do_handler_check);
- Node* handler_index = do_handler_check.PhiAt(0);
- Node* maybe_handler =
- __ LoadElement(AccessBuilder::ForWeakFixedArrayElement(),
- polymorphic_array, handler_index);
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongHandler, FeedbackSource(),
- __ TaggedEqual(maybe_handler, actual_handler), frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
- __ Goto(done);
-}
-
-void EffectControlLinearizer::ProcessMonomorphic(Node* handler,
- GraphAssemblerLabel<0>* done,
- Node* frame_state, int slot,
- Node* vector) {
- Node* feedback_slot_handler =
- __ LoadField(AccessBuilder::ForFeedbackVectorSlot(slot + 1), vector);
- Node* handler_check = __ TaggedEqual(handler, feedback_slot_handler);
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongHandler, FeedbackSource(),
- handler_check, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
- __ Goto(done);
-}
-
-void EffectControlLinearizer::BranchOnICState(
- int slot_index, Node* vector, Node* value_map, Node* frame_state,
- GraphAssemblerLabel<0>* monomorphic, GraphAssemblerLabel<0>* maybe_poly,
- GraphAssemblerLabel<0>* migrate, Node** strong_feedback,
- Node** poly_array) {
- Node* feedback =
- __ LoadField(AccessBuilder::ForFeedbackVectorSlot(slot_index), vector);
-
- Node* mono_check = BuildIsWeakReferenceTo(feedback, value_map);
- __ GotoIf(mono_check, monomorphic);
-
- Node* is_strong_ref = BuildIsStrongReference(feedback);
- if (migrate != nullptr) {
- auto check_poly = __ MakeLabel();
-
- __ GotoIf(is_strong_ref, &check_poly);
- Node* is_cleared = BuildIsClearedWeakReference(feedback);
- __ DeoptimizeIf(DeoptimizeKind::kBailout, DeoptimizeReason::kMissingMap,
- FeedbackSource(), is_cleared, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
- *strong_feedback = BuildStrongReferenceFromWeakReference(feedback);
- __ Goto(migrate);
-
- __ Bind(&check_poly);
- } else {
- __ DeoptimizeIfNot(DeoptimizeKind::kBailout, DeoptimizeReason::kMissingMap,
- FeedbackSource(), is_strong_ref, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
- }
-
- *poly_array = feedback;
- __ Goto(maybe_poly);
}
void EffectControlLinearizer::LowerDynamicCheckMaps(Node* node,
Node* frame_state) {
DynamicCheckMapsParameters const& p =
DynamicCheckMapsParametersOf(node->op());
- Node* value = node->InputAt(0);
+ Node* actual_value = node->InputAt(0);
FeedbackSource const& feedback = p.feedback();
- Node* vector = __ HeapConstant(feedback.vector);
- Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* handler = p.handler()->IsSmi()
- ? __ SmiConstant(Smi::ToInt(*p.handler()))
- : __ HeapConstant(Handle<HeapObject>::cast(p.handler()));
+ Node* actual_value_map = __ LoadField(AccessBuilder::ForMap(), actual_value);
+ Node* actual_handler =
+ p.handler()->IsSmi()
+ ? __ SmiConstant(Smi::ToInt(*p.handler()))
+ : __ HeapConstant(Handle<HeapObject>::cast(p.handler()));
auto done = __ MakeLabel();
+ auto call_builtin = __ MakeDeferredLabel();
- // Emit monomorphic checks only if current state is monomorphic. In
- // case the current state is polymorphic, and if we ever go back to
- // monomorphic start, we will deopt and reoptimize the code.
- if (p.state() == DynamicCheckMapsParameters::kMonomorphic) {
- auto monomorphic_map_match = __ MakeLabel();
- auto maybe_poly = __ MakeLabel();
- Node* strong_feedback;
- Node* poly_array;
-
- if (p.flags() & CheckMapsFlag::kTryMigrateInstance) {
- auto map_check_failed = __ MakeDeferredLabel();
- BranchOnICState(feedback.index(), vector, value_map, frame_state,
- &monomorphic_map_match, &maybe_poly, &map_check_failed,
- &strong_feedback, &poly_array);
-
- __ Bind(&map_check_failed);
- {
- MigrateInstanceOrDeopt(value, value_map, frame_state, FeedbackSource(),
- DeoptimizeReason::kMissingMap);
-
- // Check if new map matches.
- Node* new_value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* mono_check = __ TaggedEqual(strong_feedback, new_value_map);
- __ DeoptimizeIfNot(DeoptimizeKind::kBailout,
- DeoptimizeReason::kMissingMap, FeedbackSource(),
- mono_check, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
- ProcessMonomorphic(handler, &done, frame_state, feedback.index(),
- vector);
- }
+ ZoneHandleSet<Map> maps = p.maps();
+ size_t const map_count = maps.size();
+ for (size_t i = 0; i < map_count; ++i) {
+ Node* map = __ HeapConstant(maps[i]);
+ Node* check = __ TaggedEqual(actual_value_map, map);
+ if (i == map_count - 1) {
+ __ BranchWithCriticalSafetyCheck(check, &done, &call_builtin);
} else {
- BranchOnICState(feedback.index(), vector, value_map, frame_state,
- &monomorphic_map_match, &maybe_poly, nullptr,
- &strong_feedback, &poly_array);
+ auto next_map = __ MakeLabel();
+ __ BranchWithCriticalSafetyCheck(check, &done, &next_map);
+ __ Bind(&next_map);
}
+ }
- __ Bind(&monomorphic_map_match);
- ProcessMonomorphic(handler, &done, frame_state, feedback.index(), vector);
-
- __ Bind(&maybe_poly);
- // TODO(mythria): ICs don't drop deprecated maps from feedback vector.
- // So it is not equired to migrate the instance for polymorphic case.
- // When we change dynamic map checks to check only four maps re-evaluate
- // if this is required.
- CheckPolymorphic(poly_array, value_map, handler, &done, frame_state);
- } else {
- DCHECK_EQ(p.state(), DynamicCheckMapsParameters::kPolymorphic);
- Node* feedback_slot = __ LoadField(
- AccessBuilder::ForFeedbackVectorSlot(feedback.index()), vector);
- // If the IC state at code generation time is not monomorphic, we don't
- // handle monomorphic states and just deoptimize if IC transitions to
- // monomorphic. For polymorphic ICs it is not required to migrate deprecated
- // maps since ICs don't discard deprecated maps from feedback.
- Node* is_poly_or_megamorphic = BuildIsStrongReference(feedback_slot);
- __ DeoptimizeIfNot(DeoptimizeReason::kTransitionedToMonomorphicIC,
- FeedbackSource(), is_poly_or_megamorphic, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
- CheckPolymorphic(feedback_slot, value_map, handler, &done, frame_state);
+ __ Bind(&call_builtin);
+ {
+ BuildCallDynamicMapChecksBuiltin(actual_value, actual_handler,
+ feedback.index(), &done, frame_state);
}
+
__ Bind(&done);
}
@@ -2310,7 +2147,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
// are all zero, and if so we know that we can perform a division
// safely (and fast by doing an arithmetic - aka sign preserving -
// right shift on {lhs}).
- int32_t divisor = m.Value();
+ int32_t divisor = m.ResolvedValue();
Node* mask = __ Int32Constant(divisor - 1);
Node* shift = __ Int32Constant(base::bits::WhichPowerOfTwo(divisor));
Node* check = __ Word32Equal(__ Word32And(lhs, mask), zero);
@@ -2532,7 +2369,7 @@ Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
// are all zero, and if so we know that we can perform a division
// safely (and fast by doing a logical - aka zero extending - right
// shift on {lhs}).
- uint32_t divisor = m.Value();
+ uint32_t divisor = m.ResolvedValue();
Node* mask = __ Uint32Constant(divisor - 1);
Node* shift = __ Uint32Constant(base::bits::WhichPowerOfTwo(divisor));
Node* check = __ Word32Equal(__ Word32And(lhs, mask), zero);
@@ -3742,36 +3579,26 @@ void EffectControlLinearizer::LowerTierUpCheck(Node* node) {
TierUpCheckNode n(node);
TNode<FeedbackVector> vector = n.feedback_vector();
- Node* optimization_marker = __ LoadField(
- AccessBuilder::ForFeedbackVectorOptimizedCodeWeakOrSmi(), vector);
+ Node* optimization_state =
+ __ LoadField(AccessBuilder::ForFeedbackVectorFlags(), vector);
// TODO(jgruber): The branch introduces a sequence of spills before the
// branch (and restores at `fallthrough`) that are completely unnecessary
// since the IfFalse continuation ends in a tail call. Investigate how to
// avoid these and fix it.
- // TODO(jgruber): Combine the checks below for none/queued, e.g. by
- // reorganizing OptimizationMarker values such that the least significant bit
- // says whether the value is interesting or not. Also update the related
- // check in the InterpreterEntryTrampoline.
-
auto fallthrough = __ MakeLabel();
- auto optimization_marker_is_not_none = __ MakeDeferredLabel();
- auto optimization_marker_is_neither_none_nor_queued = __ MakeDeferredLabel();
- __ BranchWithHint(
- __ TaggedEqual(optimization_marker, __ SmiConstant(static_cast<int>(
- OptimizationMarker::kNone))),
- &fallthrough, &optimization_marker_is_not_none, BranchHint::kTrue);
-
- __ Bind(&optimization_marker_is_not_none);
+ auto has_optimized_code_or_marker = __ MakeDeferredLabel();
__ BranchWithHint(
- __ TaggedEqual(optimization_marker,
- __ SmiConstant(static_cast<int>(
- OptimizationMarker::kInOptimizationQueue))),
- &fallthrough, &optimization_marker_is_neither_none_nor_queued,
- BranchHint::kNone);
+ __ Word32Equal(
+ __ Word32And(optimization_state,
+ __ Uint32Constant(
+ FeedbackVector::
+ kHasNoTopTierCodeOrCompileOptimizedMarkerMask)),
+ __ Int32Constant(0)),
+ &fallthrough, &has_optimized_code_or_marker, BranchHint::kTrue);
- __ Bind(&optimization_marker_is_neither_none_nor_queued);
+ __ Bind(&has_optimized_code_or_marker);
// The optimization marker field contains a non-trivial value, and some
// action has to be taken. For example, perhaps tier-up has been requested
@@ -3781,17 +3608,8 @@ void EffectControlLinearizer::LowerTierUpCheck(Node* node) {
// Currently we delegate these tasks to the InterpreterEntryTrampoline.
// TODO(jgruber,v8:8888): Consider a dedicated builtin instead.
- const int parameter_count =
- StartNode{graph()->start()}.FormalParameterCount();
TNode<HeapObject> code =
__ HeapConstant(BUILTIN_CODE(isolate(), InterpreterEntryTrampoline));
- Node* target = __ Parameter(Linkage::kJSCallClosureParamIndex);
- Node* new_target =
- __ Parameter(Linkage::GetJSCallNewTargetParamIndex(parameter_count));
- Node* argc =
- __ Parameter(Linkage::GetJSCallArgCountParamIndex(parameter_count));
- Node* context =
- __ Parameter(Linkage::GetJSCallContextParamIndex(parameter_count));
JSTrampolineDescriptor descriptor;
CallDescriptor::Flags flags = CallDescriptor::kFixedTargetRegister |
@@ -3799,8 +3617,8 @@ void EffectControlLinearizer::LowerTierUpCheck(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), descriptor, descriptor.GetStackParameterCount(), flags,
Operator::kNoProperties);
- Node* nodes[] = {code, target, new_target, argc,
- context, __ effect(), __ control()};
+ Node* nodes[] = {code, n.target(), n.new_target(), n.input_count(),
+ n.context(), __ effect(), __ control()};
#ifdef DEBUG
static constexpr int kCodeContextEffectControl = 4;
@@ -5235,13 +5053,21 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
CHECK_EQ(FastApiCallNode::ArityForArgc(c_arg_count, js_arg_count),
value_input_count);
- // Add the { has_error } output parameter.
- int kAlign = 4;
- int kSize = 4;
- Node* has_error = __ StackSlot(kSize, kAlign);
- // Generate the store to `has_error`.
+ if (fast_api_call_stack_slot_ == nullptr) {
+ // Add the { fallback } output parameter.
+ int kAlign = 4;
+ int kSize = sizeof(v8::FastApiCallbackOptions);
+ // If this check fails, probably you've added new fields to
+ // v8::FastApiCallbackOptions, which means you'll need to write code
+ // that initializes and reads from them too (see the Store and Load to
+ // fast_api_call_stack_slot_ below).
+ CHECK_EQ(kSize, 1);
+ fast_api_call_stack_slot_ = __ StackSlot(kSize, kAlign);
+ }
+
+ // Generate the store to `fast_api_call_stack_slot_`.
__ Store(StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
- has_error, 0, jsgraph()->ZeroConstant());
+ fast_api_call_stack_slot_, 0, jsgraph()->ZeroConstant());
MachineSignature::Builder builder(
graph()->zone(), 1, c_arg_count + FastApiCallNode::kHasErrorInputCount);
@@ -5252,7 +5078,7 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
MachineTypeFor(c_signature->ArgumentInfo(i).GetType());
builder.AddParam(machine_type);
}
- builder.AddParam(MachineType::Pointer()); // has_error
+ builder.AddParam(MachineType::Pointer()); // fast_api_call_stack_slot_
CallDescriptor* call_descriptor =
Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());
@@ -5261,19 +5087,26 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
Node** const inputs = graph()->zone()->NewArray<Node*>(
c_arg_count + FastApiCallNode::kFastCallExtraInputCount);
- for (int i = 0; i < c_arg_count + FastApiCallNode::kFastTargetInputCount;
- ++i) {
- inputs[i] = NodeProperties::GetValueInput(node, i);
+ inputs[0] = NodeProperties::GetValueInput(node, 0); // the target
+ for (int i = FastApiCallNode::kFastTargetInputCount;
+ i < c_arg_count + FastApiCallNode::kFastTargetInputCount; ++i) {
+ if (c_signature->ArgumentInfo(i - 1).GetType() ==
+ CTypeInfo::Type::kFloat32) {
+ inputs[i] =
+ __ TruncateFloat64ToFloat32(NodeProperties::GetValueInput(node, i));
+ } else {
+ inputs[i] = NodeProperties::GetValueInput(node, i);
+ }
}
- inputs[c_arg_count + 1] = has_error;
+ inputs[c_arg_count + 1] = fast_api_call_stack_slot_;
inputs[c_arg_count + 2] = __ effect();
inputs[c_arg_count + 3] = __ control();
__ Call(call_descriptor,
c_arg_count + FastApiCallNode::kFastCallExtraInputCount, inputs);
- // Generate the load from `has_error`.
- Node* load = __ Load(MachineType::Int32(), has_error, 0);
+ // Generate the load from `fast_api_call_stack_slot_`.
+ Node* load = __ Load(MachineType::Int32(), fast_api_call_stack_slot_, 0);
TNode<Boolean> cond =
TNode<Boolean>::UncheckedCast(__ Word32Equal(load, __ Int32Constant(0)));
@@ -6527,9 +6360,9 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key(
auto if_match = __ MakeLabel();
auto if_notmatch = __ MakeLabel();
auto if_notsmi = __ MakeDeferredLabel();
- __ GotoIfNot(ObjectIsSmi(candidate_key), &if_notsmi);
- __ Branch(__ Word32Equal(ChangeSmiToInt32(candidate_key), key), &if_match,
- &if_notmatch);
+ __ GotoIfNot(ObjectIsSmi(candidate_key), &if_notsmi);
+ __ Branch(__ Word32Equal(ChangeSmiToInt32(candidate_key), key), &if_match,
+ &if_notmatch);
__ Bind(&if_notsmi);
__ GotoIfNot(
@@ -6627,10 +6460,11 @@ void LinearizeEffectControl(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
MaskArrayIndexEnable mask_array_index,
- MaintainSchedule maintain_schedule) {
- EffectControlLinearizer linearizer(graph, schedule, temp_zone,
- source_positions, node_origins,
- mask_array_index, maintain_schedule);
+ MaintainSchedule maintain_schedule,
+ JSHeapBroker* broker) {
+ EffectControlLinearizer linearizer(
+ graph, schedule, temp_zone, source_positions, node_origins,
+ mask_array_index, maintain_schedule, broker);
linearizer.Run();
}
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index d747da1676..fbfd3046dc 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -21,6 +21,7 @@ class JSGraph;
class NodeOriginTable;
class Schedule;
class SourcePositionTable;
+class JSHeapBroker;
enum class MaskArrayIndexEnable { kDoNotMaskArrayIndex, kMaskArrayIndex };
@@ -29,7 +30,8 @@ enum class MaintainSchedule { kMaintain, kDiscard };
V8_EXPORT_PRIVATE void LinearizeEffectControl(
JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- MaskArrayIndexEnable mask_array_index, MaintainSchedule maintain_schedule);
+ MaskArrayIndexEnable mask_array_index, MaintainSchedule maintain_schedule,
+ JSHeapBroker* broker);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index 89a8d4e118..f4ab1c9709 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/escape-analysis-reducer.h"
#include "src/compiler/all-nodes.h"
+#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/execution/frame-constants.h"
@@ -68,17 +69,6 @@ Reduction EscapeAnalysisReducer::ReplaceNode(Node* original,
return NoChange();
}
-namespace {
-
-Node* SkipTypeGuards(Node* node) {
- while (node->opcode() == IrOpcode::kTypeGuard) {
- node = NodeProperties::GetValueInput(node, 0);
- }
- return node;
-}
-
-} // namespace
-
Node* EscapeAnalysisReducer::ObjectIdNode(const VirtualObject* vobject) {
VirtualObject::Id id = vobject->id();
if (id >= object_id_cache_.size()) object_id_cache_.resize(id + 1);
@@ -185,8 +175,8 @@ Node* EscapeAnalysisReducer::ReduceDeoptState(Node* node, Node* effect,
i);
}
return new_node.Get();
- } else if (const VirtualObject* vobject =
- analysis_result().GetVirtualObject(SkipTypeGuards(node))) {
+ } else if (const VirtualObject* vobject = analysis_result().GetVirtualObject(
+ SkipValueIdentities(node))) {
if (vobject->HasEscaped()) return node;
if (deduplicator->SeenBefore(vobject)) {
return ObjectIdNode(vobject);
@@ -315,7 +305,6 @@ void EscapeAnalysisReducer::Finalize() {
formal_parameter_count,
Type::Constant(params.formal_parameter_count(),
jsgraph()->graph()->zone()));
-#ifdef V8_REVERSE_JSARGS
Node* offset_to_first_elem = jsgraph()->Constant(
CommonFrameConstants::kFixedSlotCountAboveFp);
if (!NodeProperties::IsTyped(offset_to_first_elem)) {
@@ -337,22 +326,6 @@ void EscapeAnalysisReducer::Finalize() {
jsgraph()->simplified()->NumberAdd(), offset,
formal_parameter_count);
}
-#else
- // {offset} is a reverted index starting from 1. The base address is
- // adapted to allow offsets starting from 1.
- Node* offset = jsgraph()->graph()->NewNode(
- jsgraph()->simplified()->NumberSubtract(), arguments_length,
- index);
- if (type == CreateArgumentsType::kRestParameter) {
- // In the case of rest parameters we should skip the formal
- // parameters.
- NodeProperties::SetType(offset,
- TypeCache::Get()->kArgumentsLengthType);
- offset = jsgraph()->graph()->NewNode(
- jsgraph()->simplified()->NumberSubtract(), offset,
- formal_parameter_count);
- }
-#endif
NodeProperties::SetType(offset,
TypeCache::Get()->kArgumentsLengthType);
NodeProperties::ReplaceValueInput(load, arguments_frame, 0);
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index 1c1267b3c7..49b672a26b 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -85,6 +85,8 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
public:
EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
EscapeAnalysisResult analysis_result, Zone* zone);
+ EscapeAnalysisReducer(const EscapeAnalysisReducer&) = delete;
+ EscapeAnalysisReducer& operator=(const EscapeAnalysisReducer&) = delete;
Reduction Reduce(Node* node) override;
const char* reducer_name() const override { return "EscapeAnalysisReducer"; }
@@ -111,8 +113,6 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
NodeHashCache node_cache_;
ZoneSet<Node*> arguments_elements_;
Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisReducer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 2a096b6933..07587524a4 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -119,6 +119,9 @@ class VariableTracker {
public:
VariableTracker(JSGraph* graph, EffectGraphReducer* reducer, Zone* zone);
+ VariableTracker(const VariableTracker&) = delete;
+ VariableTracker& operator=(const VariableTracker&) = delete;
+
Variable NewVariable() { return Variable(next_variable_++); }
Node* Get(Variable var, Node* effect) { return table_.Get(effect).Get(var); }
Zone* zone() { return zone_; }
@@ -155,8 +158,6 @@ class VariableTracker {
EffectGraphReducer* reducer_;
int next_variable_ = 0;
TickCounter* const tick_counter_;
-
- DISALLOW_COPY_AND_ASSIGN(VariableTracker);
};
// Encapsulates the current state of the escape analysis reducer to preserve
@@ -170,6 +171,8 @@ class EscapeAnalysisTracker : public ZoneObject {
variable_states_(jsgraph, reducer, zone),
jsgraph_(jsgraph),
zone_(zone) {}
+ EscapeAnalysisTracker(const EscapeAnalysisTracker&) = delete;
+ EscapeAnalysisTracker& operator=(const EscapeAnalysisTracker&) = delete;
class Scope : public VariableTracker::Scope {
public:
@@ -276,8 +279,6 @@ class EscapeAnalysisTracker : public ZoneObject {
VirtualObject::Id next_object_id_ = 0;
JSGraph* const jsgraph_;
Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisTracker);
};
EffectGraphReducer::EffectGraphReducer(
@@ -559,9 +560,9 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
switch (op->opcode()) {
case IrOpcode::kAllocate: {
NumberMatcher size(current->ValueInput(0));
- if (!size.HasValue()) break;
- int size_int = static_cast<int>(size.Value());
- if (size_int != size.Value()) break;
+ if (!size.HasResolvedValue()) break;
+ int size_int = static_cast<int>(size.ResolvedValue());
+ if (size_int != size.ResolvedValue()) break;
if (const VirtualObject* vobject = current->InitVirtualObject(size_int)) {
// Initialize with dead nodes as a sentinel for uninitialized memory.
for (Variable field : *vobject) {
diff --git a/deps/v8/src/compiler/feedback-source.cc b/deps/v8/src/compiler/feedback-source.cc
index 8c3d175c28..a8a67f786f 100644
--- a/deps/v8/src/compiler/feedback-source.cc
+++ b/deps/v8/src/compiler/feedback-source.cc
@@ -17,9 +17,6 @@ FeedbackSource::FeedbackSource(Handle<FeedbackVector> vector_,
FeedbackSource::FeedbackSource(FeedbackVectorRef vector_, FeedbackSlot slot_)
: FeedbackSource(vector_.object(), slot_) {}
-FeedbackSource::FeedbackSource(FeedbackNexus const& nexus)
- : FeedbackSource(nexus.vector_handle(), nexus.slot()) {}
-
int FeedbackSource::index() const {
CHECK(IsValid());
return FeedbackVector::GetIndex(slot);
diff --git a/deps/v8/src/compiler/feedback-source.h b/deps/v8/src/compiler/feedback-source.h
index 8484acb455..29c22cde9c 100644
--- a/deps/v8/src/compiler/feedback-source.h
+++ b/deps/v8/src/compiler/feedback-source.h
@@ -17,7 +17,6 @@ struct FeedbackSource {
V8_EXPORT_PRIVATE FeedbackSource(Handle<FeedbackVector> vector_,
FeedbackSlot slot_);
FeedbackSource(FeedbackVectorRef vector_, FeedbackSlot slot_);
- explicit FeedbackSource(FeedbackNexus const& nexus);
bool IsValid() const { return !vector.is_null() && !slot.IsInvalid(); }
int index() const;
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index 18f0df8c80..7d1a9dfb3a 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -89,6 +89,8 @@ class CallDescriptor;
class V8_EXPORT_PRIVATE Frame : public ZoneObject {
public:
explicit Frame(int fixed_frame_size_in_slots);
+ Frame(const Frame&) = delete;
+ Frame& operator=(const Frame&) = delete;
inline int GetTotalFrameSlotCount() const { return frame_slot_count_; }
inline int GetFixedSlotCount() const { return fixed_slot_count_; }
@@ -173,8 +175,6 @@ class V8_EXPORT_PRIVATE Frame : public ZoneObject {
int return_slot_count_;
BitVector* allocated_registers_;
BitVector* allocated_double_registers_;
-
- DISALLOW_COPY_AND_ASSIGN(Frame);
};
// Represents an offset from either the stack pointer or frame pointer.
diff --git a/deps/v8/src/compiler/globals.h b/deps/v8/src/compiler/globals.h
index fe96783c23..ff5b5a5732 100644
--- a/deps/v8/src/compiler/globals.h
+++ b/deps/v8/src/compiler/globals.h
@@ -71,4 +71,13 @@ inline std::ostream& operator<<(std::ostream& os,
} // namespace internal
} // namespace v8
+// Support for floating point parameters in calls to C.
+// It's currently enabled only for the platforms listed below. We don't plan
+// to add support for IA32, because it has a totally different approach
+// (using FP stack). As support is added to more platforms, please make sure
+// to list them here in order to enable tests of this functionality.
+#if defined(V8_TARGET_ARCH_X64)
+#define V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+#endif
+
#endif // V8_COMPILER_GLOBALS_H_
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 975efedf0f..aaa0644da6 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -351,6 +351,10 @@ Node* GraphAssembler::IntPtrConstant(intptr_t value) {
return AddClonedNode(mcgraph()->IntPtrConstant(value));
}
+Node* GraphAssembler::UintPtrConstant(uintptr_t value) {
+ return AddClonedNode(mcgraph()->UintPtrConstant(value));
+}
+
Node* GraphAssembler::Int32Constant(int32_t value) {
return AddClonedNode(mcgraph()->Int32Constant(value));
}
@@ -709,6 +713,18 @@ Node* GraphAssembler::LoadUnaligned(MachineType type, Node* object,
return AddNode(graph()->NewNode(op, object, offset, effect(), control()));
}
+Node* GraphAssembler::ProtectedStore(MachineRepresentation rep, Node* object,
+ Node* offset, Node* value) {
+ return AddNode(graph()->NewNode(machine()->ProtectedStore(rep), object,
+ offset, value, effect(), control()));
+}
+
+Node* GraphAssembler::ProtectedLoad(MachineType type, Node* object,
+ Node* offset) {
+ return AddNode(graph()->NewNode(machine()->ProtectedLoad(type), object,
+ offset, effect(), control()));
+}
+
Node* GraphAssembler::Retain(Node* buffer) {
return AddNode(graph()->NewNode(common()->Retain(), buffer, effect()));
}
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 1be52317c0..eb7f6cc3c0 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -47,6 +47,7 @@ class BasicBlock;
V(Float64ExtractLowWord32) \
V(Float64SilenceNaN) \
V(RoundFloat64ToInt32) \
+ V(TruncateFloat64ToFloat32) \
V(TruncateFloat64ToInt64) \
V(TruncateFloat64ToWord32) \
V(TruncateInt64ToInt32) \
@@ -236,6 +237,7 @@ class V8_EXPORT_PRIVATE GraphAssembler {
// Value creation.
Node* IntPtrConstant(intptr_t value);
+ Node* UintPtrConstant(uintptr_t value);
Node* Uint32Constant(uint32_t value);
Node* Int32Constant(int32_t value);
Node* Int64Constant(int64_t value);
@@ -303,6 +305,10 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* value);
Node* LoadUnaligned(MachineType type, Node* object, Node* offset);
+ Node* ProtectedStore(MachineRepresentation rep, Node* object, Node* offset,
+ Node* value);
+ Node* ProtectedLoad(MachineType type, Node* object, Node* offset);
+
Node* Retain(Node* buffer);
Node* UnsafePointerAdd(Node* base, Node* external);
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 95454098d5..171033fe53 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -139,6 +139,9 @@ class V8_EXPORT_PRIVATE GraphReducer
JSHeapBroker* broker, Node* dead = nullptr);
~GraphReducer() override;
+ GraphReducer(const GraphReducer&) = delete;
+ GraphReducer& operator=(const GraphReducer&) = delete;
+
Graph* graph() const { return graph_; }
void AddReducer(Reducer* reducer);
@@ -190,8 +193,6 @@ class V8_EXPORT_PRIVATE GraphReducer
ZoneStack<NodeState> stack_;
TickCounter* const tick_counter_;
JSHeapBroker* const broker_;
-
- DISALLOW_COPY_AND_ASSIGN(GraphReducer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/graph-trimmer.h b/deps/v8/src/compiler/graph-trimmer.h
index 5a5f525ef4..8e420226d1 100644
--- a/deps/v8/src/compiler/graph-trimmer.h
+++ b/deps/v8/src/compiler/graph-trimmer.h
@@ -20,6 +20,8 @@ class V8_EXPORT_PRIVATE GraphTrimmer final {
public:
GraphTrimmer(Zone* zone, Graph* graph);
~GraphTrimmer();
+ GraphTrimmer(const GraphTrimmer&) = delete;
+ GraphTrimmer& operator=(const GraphTrimmer&) = delete;
// Trim nodes in the {graph} that are not reachable from {graph->end()}.
void TrimGraph();
@@ -50,8 +52,6 @@ class V8_EXPORT_PRIVATE GraphTrimmer final {
Graph* const graph_;
NodeMarker<bool> is_live_;
NodeVector live_;
-
- DISALLOW_COPY_AND_ASSIGN(GraphTrimmer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 36372f5d02..c633f4fa62 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -277,6 +277,8 @@ class JSONGraphNodeWriter {
positions_(positions),
origins_(origins),
first_node_(true) {}
+ JSONGraphNodeWriter(const JSONGraphNodeWriter&) = delete;
+ JSONGraphNodeWriter& operator=(const JSONGraphNodeWriter&) = delete;
void Print() {
for (Node* const node : all_.reachable) PrintNode(node);
@@ -349,8 +351,6 @@ class JSONGraphNodeWriter {
const SourcePositionTable* positions_;
const NodeOriginTable* origins_;
bool first_node_;
-
- DISALLOW_COPY_AND_ASSIGN(JSONGraphNodeWriter);
};
@@ -358,6 +358,8 @@ class JSONGraphEdgeWriter {
public:
JSONGraphEdgeWriter(std::ostream& os, Zone* zone, const Graph* graph)
: os_(os), all_(zone, graph, false), first_edge_(true) {}
+ JSONGraphEdgeWriter(const JSONGraphEdgeWriter&) = delete;
+ JSONGraphEdgeWriter& operator=(const JSONGraphEdgeWriter&) = delete;
void Print() {
for (Node* const node : all_.reachable) PrintEdges(node);
@@ -400,8 +402,6 @@ class JSONGraphEdgeWriter {
std::ostream& os_;
AllNodes all_;
bool first_edge_;
-
- DISALLOW_COPY_AND_ASSIGN(JSONGraphEdgeWriter);
};
std::ostream& operator<<(std::ostream& os, const GraphAsJSON& ad) {
@@ -420,6 +420,8 @@ std::ostream& operator<<(std::ostream& os, const GraphAsJSON& ad) {
class GraphC1Visualizer {
public:
GraphC1Visualizer(std::ostream& os, Zone* zone); // NOLINT
+ GraphC1Visualizer(const GraphC1Visualizer&) = delete;
+ GraphC1Visualizer& operator=(const GraphC1Visualizer&) = delete;
void PrintCompilation(const OptimizedCompilationInfo* info);
void PrintSchedule(const char* phase, const Schedule* schedule,
@@ -470,8 +472,6 @@ class GraphC1Visualizer {
std::ostream& os_;
int indent_;
Zone* zone_;
-
- DISALLOW_COPY_AND_ASSIGN(GraphC1Visualizer);
};
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index 8d2acfded7..c8af078895 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -34,6 +34,8 @@ using NodeId = uint32_t;
class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit Graph(Zone* zone);
+ Graph(const Graph&) = delete;
+ Graph& operator=(const Graph&) = delete;
// Scope used when creating a subgraph for inlining. Automatically preserves
// the original start and end nodes of the graph, and resets them when you
@@ -46,13 +48,13 @@ class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
graph_->SetStart(start_);
graph_->SetEnd(end_);
}
+ SubgraphScope(const SubgraphScope&) = delete;
+ SubgraphScope& operator=(const SubgraphScope&) = delete;
private:
Graph* const graph_;
Node* const start_;
Node* const end_;
-
- DISALLOW_COPY_AND_ASSIGN(SubgraphScope);
};
// Base implementation used by all factory methods.
@@ -105,8 +107,6 @@ class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
Mark mark_max_;
NodeId next_node_id_;
ZoneVector<GraphDecorator*> decorators_;
-
- DISALLOW_COPY_AND_ASSIGN(Graph);
};
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index f66b678632..b268593a48 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -13,6 +13,7 @@
#include "src/utils/boxed-float.h"
namespace v8 {
+
class CFunctionInfo;
namespace internal {
@@ -32,6 +33,7 @@ class NativeContext;
class ScriptContextTable;
namespace compiler {
+
// Whether we are loading a property or storing to a property.
// For a store during literal creation, do not walk up the prototype chain.
enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
@@ -58,9 +60,13 @@ enum class OddballType : uint8_t {
#define HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(V) \
/* Subtypes of FixedArray */ \
V(ObjectBoilerplateDescription) \
+ V(ScopeInfo) \
+ /* Subtypes of Name */ \
+ V(Symbol) \
/* Subtypes of HeapObject */ \
V(AccessorInfo) \
V(ArrayBoilerplateDescription) \
+ V(CallHandlerInfo) \
V(Cell) \
V(TemplateObjectDescription)
@@ -80,7 +86,6 @@ enum class OddballType : uint8_t {
V(NativeContext) \
/* Subtypes of FixedArray */ \
V(Context) \
- V(ScopeInfo) \
V(ScriptContextTable) \
/* Subtypes of FixedArrayBase */ \
V(BytecodeArray) \
@@ -89,13 +94,11 @@ enum class OddballType : uint8_t {
/* Subtypes of Name */ \
V(InternalizedString) \
V(String) \
- V(Symbol) \
/* Subtypes of JSReceiver */ \
V(JSObject) \
/* Subtypes of HeapObject */ \
V(AllocationSite) \
V(BigInt) \
- V(CallHandlerInfo) \
V(Code) \
V(DescriptorArray) \
V(FeedbackCell) \
@@ -316,7 +319,7 @@ class JSBoundFunctionRef : public JSObjectRef {
Handle<JSBoundFunction> object() const;
- void Serialize();
+ bool Serialize();
bool serialized() const;
// The following are available only after calling Serialize().
@@ -347,6 +350,7 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
NativeContextRef native_context() const;
SharedFunctionInfoRef shared() const;
FeedbackVectorRef feedback_vector() const;
+ FeedbackCellRef raw_feedback_cell() const;
CodeRef code() const;
int InitialMapInstanceSizeWithMinSlack() const;
};
@@ -772,8 +776,7 @@ class ScopeInfoRef : public HeapObjectRef {
int ContextLength() const;
bool HasOuterScopeInfo() const;
- int Flags() const;
- bool HasContextExtension() const;
+ bool HasContextExtensionSlot() const;
// Only serialized via SerializeScopeInfoChain.
ScopeInfoRef OuterScopeInfo() const;
@@ -791,8 +794,6 @@ class ScopeInfoRef : public HeapObjectRef {
V(bool, HasBuiltinId) \
V(bool, construct_as_builtin) \
V(bool, HasBytecodeArray) \
- V(bool, is_safe_to_skip_arguments_adaptor) \
- V(SharedFunctionInfo::Inlineability, GetInlineability) \
V(int, StartPosition) \
V(bool, is_compiled) \
V(bool, IsUserJavaScript)
@@ -806,6 +807,7 @@ class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
int builtin_id() const;
int context_header_size() const;
BytecodeArrayRef GetBytecodeArray() const;
+ SharedFunctionInfo::Inlineability GetInlineability() const;
#define DECL_ACCESSOR(type, name) type name() const;
BROKER_SFI_FIELDS(DECL_ACCESSOR)
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index a6bbd563a0..2ef7d8af32 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -680,9 +680,9 @@ void Int64Lowering::LowerNode(Node* node) {
? GetReplacementLow(node->InputAt(1))
: node->InputAt(1);
Int32Matcher m(shift);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
// Precondition: 0 <= shift < 64.
- int32_t shift_value = m.Value() & 0x3F;
+ int32_t shift_value = m.ResolvedValue() & 0x3F;
if (shift_value == 0) {
ReplaceNode(node, GetReplacementLow(input),
GetReplacementHigh(input));
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 94a6b3a7c7..7088fb0d43 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -157,6 +157,9 @@ class JSCallReducerAssembler : public JSGraphAssembler {
gasm_->Bind(&merge);
}
+ IfBuilder0(const IfBuilder0&) = delete;
+ IfBuilder0& operator=(const IfBuilder0&) = delete;
+
private:
JSGraphAssembler* const gasm_;
const TNode<Boolean> cond_;
@@ -166,8 +169,6 @@ class JSCallReducerAssembler : public JSGraphAssembler {
BranchHint hint_ = BranchHint::kNone;
VoidGenerator0 then_body_;
VoidGenerator0 else_body_;
-
- DISALLOW_COPY_AND_ASSIGN(IfBuilder0);
};
IfBuilder0 If(TNode<Boolean> cond) { return {this, cond, false}; }
@@ -882,14 +883,13 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
class FastApiCallReducerAssembler : public JSCallReducerAssembler {
public:
FastApiCallReducerAssembler(
- JSCallReducer* reducer, Node* node, Address c_function,
- const CFunctionInfo* c_signature,
+ JSCallReducer* reducer, Node* node,
const FunctionTemplateInfoRef function_template_info, Node* receiver,
Node* holder, const SharedFunctionInfoRef shared, Node* target,
const int arity, Node* effect)
: JSCallReducerAssembler(reducer, node),
- c_function_(c_function),
- c_signature_(c_signature),
+ c_function_(function_template_info.c_function()),
+ c_signature_(function_template_info.c_signature()),
function_template_info_(function_template_info),
receiver_(receiver),
holder_(holder),
@@ -2640,8 +2640,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
// recomputed even if the actual value of the object changes.
// This mirrors the checks done in builtins-function-gen.cc at
// runtime otherwise.
- int minimum_nof_descriptors = i::Max(JSFunction::kLengthDescriptorIndex,
- JSFunction::kNameDescriptorIndex) +
+ int minimum_nof_descriptors = std::max({JSFunction::kLengthDescriptorIndex,
+ JSFunction::kNameDescriptorIndex}) +
1;
if (receiver_map.NumberOfOwnDescriptors() < minimum_nof_descriptors) {
return inference.NoChange();
@@ -2725,7 +2725,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
// to ensure any exception is thrown in the correct context.
Node* context;
HeapObjectMatcher m(target);
- if (m.HasValue()) {
+ if (m.HasResolvedValue() && m.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
if (should_disallow_heap_access() && !function.serialized()) {
TRACE_BROKER_MISSING(broker(), "Serialize call on function " << function);
@@ -2901,10 +2901,10 @@ Reduction JSCallReducer::ReduceObjectPrototypeHasOwnProperty(Node* node) {
// Object.prototype.hasOwnProperty does an implicit ToObject anyway, and
// these operations are not observable.
if (name->opcode() == IrOpcode::kJSForInNext) {
- ForInMode const mode = ForInModeOf(name->op());
- if (mode != ForInMode::kGeneric) {
- Node* object = NodeProperties::GetValueInput(name, 0);
- Node* cache_type = NodeProperties::GetValueInput(name, 2);
+ JSForInNextNode n(name);
+ if (n.Parameters().mode() != ForInMode::kGeneric) {
+ Node* object = n.receiver();
+ Node* cache_type = n.cache_type();
if (object->opcode() == IrOpcode::kJSToObject) {
object = NodeProperties::GetValueInput(object, 0);
}
@@ -3453,6 +3453,54 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
return ReplaceWithSubgraph(&a, subgraph);
}
+#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+namespace {
+bool HasFPParamsInSignature(const CFunctionInfo* c_signature) {
+ for (unsigned int i = 0; i < c_signature->ArgumentCount(); ++i) {
+ if (c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kFloat32 ||
+ c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kFloat64) {
+ return true;
+ }
+ }
+ return false;
+}
+} // namespace
+#endif
+
+#ifndef V8_TARGET_ARCH_64_BIT
+namespace {
+bool Has64BitIntegerParamsInSignature(const CFunctionInfo* c_signature) {
+ for (unsigned int i = 0; i < c_signature->ArgumentCount(); ++i) {
+ if (c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kInt64 ||
+ c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kUint64) {
+ return true;
+ }
+ }
+ return false;
+}
+} // namespace
+#endif
+
+bool CanOptimizeFastCall(
+ const FunctionTemplateInfoRef& function_template_info) {
+ const CFunctionInfo* c_signature = function_template_info.c_signature();
+
+ bool optimize_to_fast_call =
+ FLAG_turbo_fast_api_calls &&
+ function_template_info.c_function() != kNullAddress;
+#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+ optimize_to_fast_call =
+ optimize_to_fast_call && !HasFPParamsInSignature(c_signature);
+#else
+ USE(c_signature);
+#endif
+#ifndef V8_TARGET_ARCH_64_BIT
+ optimize_to_fast_call =
+ optimize_to_fast_call && !Has64BitIntegerParamsInSignature(c_signature);
+#endif
+ return optimize_to_fast_call;
+}
+
Reduction JSCallReducer::ReduceCallApiFunction(
Node* node, const SharedFunctionInfoRef& shared) {
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
@@ -3624,13 +3672,9 @@ Reduction JSCallReducer::ReduceCallApiFunction(
return NoChange();
}
- Address c_function = function_template_info.c_function();
-
- if (FLAG_turbo_fast_api_calls && c_function != kNullAddress) {
- const CFunctionInfo* c_signature = function_template_info.c_signature();
- FastApiCallReducerAssembler a(this, node, c_function, c_signature,
- function_template_info, receiver, holder,
- shared, target, argc, effect);
+ if (CanOptimizeFastCall(function_template_info)) {
+ FastApiCallReducerAssembler a(this, node, function_template_info, receiver,
+ holder, shared, target, argc, effect);
Node* fast_call_subgraph = a.ReduceFastApiCall();
ReplaceWithSubgraph(&a, fast_call_subgraph);
@@ -3934,7 +3978,7 @@ namespace {
bool ShouldUseCallICFeedback(Node* node) {
HeapObjectMatcher m(node);
- if (m.HasValue() || m.IsCheckClosure() || m.IsJSCreateClosure()) {
+ if (m.HasResolvedValue() || m.IsCheckClosure() || m.IsJSCreateClosure()) {
// Don't use CallIC feedback when we know the function
// being called, i.e. either know the closure itself or
// at least the SharedFunctionInfo.
@@ -3970,6 +4014,8 @@ bool JSCallReducer::IsBuiltinOrApiFunction(JSFunctionRef function) const {
}
Reduction JSCallReducer::ReduceJSCall(Node* node) {
+ if (broker()->StackHasOverflowed()) return NoChange();
+
JSCallNode n(node);
CallParameters const& p = n.Parameters();
Node* target = n.target();
@@ -3979,7 +4025,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Try to specialize JSCall {node}s with constant {target}s.
HeapObjectMatcher m(target);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
ObjectRef target_ref = m.Ref(broker());
if (target_ref.IsJSFunction()) {
JSFunctionRef function = target_ref.AsJSFunction();
@@ -4104,7 +4150,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
if (feedback_target.has_value() && feedback_target->map().is_callable()) {
Node* target_function = jsgraph()->Constant(*feedback_target);
- if (FLAG_turboprop) {
+ if (broker()->is_turboprop()) {
if (!feedback_target->IsJSFunction()) return NoChange();
if (!IsBuiltinOrApiFunction(feedback_target->AsJSFunction())) {
return NoChange();
@@ -4138,7 +4184,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return NoChange();
}
- if (FLAG_turboprop &&
+ if (broker()->is_turboprop() &&
!feedback_vector.shared_function_info().HasBuiltinId()) {
return NoChange();
}
@@ -4578,7 +4624,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
arity, feedback_target->AsAllocationSite().object()));
return Changed(node);
} else if (feedback_target.has_value() &&
- !HeapObjectMatcher(new_target).HasValue() &&
+ !HeapObjectMatcher(new_target).HasResolvedValue() &&
feedback_target->map().is_constructor()) {
Node* new_target_feedback = jsgraph()->Constant(*feedback_target);
@@ -4603,7 +4649,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Try to specialize JSConstruct {node}s with constant {target}s.
HeapObjectMatcher m(target);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
HeapObjectRef target_ref = m.Ref(broker());
// Raise a TypeError if the {target} is not a constructor.
@@ -4659,7 +4705,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// constructor), {value} will be ignored and therefore we can lower
// to {JSCreate}. See https://tc39.es/ecma262/#sec-object-value.
HeapObjectMatcher mnew_target(new_target);
- if (mnew_target.HasValue() &&
+ if (mnew_target.HasResolvedValue() &&
!mnew_target.Ref(broker()).equals(function)) {
// Drop the value inputs.
node->RemoveInput(n.FeedbackVectorIndex());
@@ -4965,7 +5011,7 @@ Reduction JSCallReducer::ReduceForInsufficientFeedback(
// TODO(mythria): May be add additional flags to specify if we need to deopt
// on calls / construct rather than checking for TurboProp here. We may need
// it for NativeContextIndependent code too.
- if (FLAG_turboprop) return NoChange();
+ if (broker()->is_turboprop()) return NoChange();
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -5999,7 +6045,7 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) {
Node* position = n.ArgumentOr(1, jsgraph()->ZeroConstant());
HeapObjectMatcher m(search_string);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
ObjectRef target_ref = m.Ref(broker());
if (target_ref.IsString()) {
StringRef str = target_ref.AsString();
@@ -7323,7 +7369,7 @@ Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
// Check that the {offset} is within range for the {receiver}.
HeapObjectMatcher m(receiver);
- if (m.HasValue()) {
+ if (m.HasResolvedValue() && m.Ref(broker()).IsJSDataView()) {
// We only deal with DataViews here whose [[ByteLength]] is at least
// {element_size}, as for all other DataViews it'll be out-of-bounds.
JSDataViewRef dataview = m.Ref(broker()).AsJSDataView();
@@ -7602,7 +7648,7 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
// Add proper dependencies on the {regexp}s [[Prototype]]s.
dependencies()->DependOnStablePrototypeChains(
- ai_exec.receiver_maps(), kStartAtPrototype,
+ ai_exec.lookup_start_object_maps(), kStartAtPrototype,
JSObjectRef(broker(), holder));
} else {
return inference.NoChange();
@@ -7688,7 +7734,7 @@ Reduction JSCallReducer::ReduceBigIntAsUintN(Node* node) {
NumberMatcher matcher(bits);
if (matcher.IsInteger() && matcher.IsInRange(0, 64)) {
- const int bits_value = static_cast<int>(matcher.Value());
+ const int bits_value = static_cast<int>(matcher.ResolvedValue());
value = effect = graph()->NewNode(simplified()->CheckBigInt(p.feedback()),
value, effect, control);
value = graph()->NewNode(simplified()->BigIntAsUintN(bits_value), value);
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index f74bd5b6de..2da6d8d732 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -44,6 +44,8 @@ class V8_EXPORT_PRIVATE JSContextSpecialization final : public AdvancedReducer {
outer_(outer),
closure_(closure),
broker_(broker) {}
+ JSContextSpecialization(const JSContextSpecialization&) = delete;
+ JSContextSpecialization& operator=(const JSContextSpecialization&) = delete;
const char* reducer_name() const override {
return "JSContextSpecialization";
@@ -72,8 +74,6 @@ class V8_EXPORT_PRIVATE JSContextSpecialization final : public AdvancedReducer {
Maybe<OuterContext> outer_;
MaybeHandle<JSFunction> closure_;
JSHeapBroker* const broker_;
-
- DISALLOW_COPY_AND_ASSIGN(JSContextSpecialization);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 1f3169fad3..619475ef7f 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -28,7 +28,6 @@
#include "src/objects/js-regexp-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/template-objects.h"
-#include "torque-generated/exported-class-definitions.h"
namespace v8 {
namespace internal {
@@ -1496,7 +1495,7 @@ Node* JSCreateLowering::AllocateAliasedArguments(
}
// Calculate number of argument values being aliased/mapped.
- int mapped_count = Min(argument_count, parameter_count);
+ int mapped_count = std::min(argument_count, parameter_count);
*has_aliased_arguments = true;
// Prepare an iterator over argument values recorded in the frame state.
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 81bafa6183..0b38bd538d 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -7,6 +7,7 @@
#include "src/ast/ast.h"
#include "src/builtins/builtins-constructor.h"
#include "src/codegen/code-factory.h"
+#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
@@ -15,6 +16,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/processed-feedback.h"
+#include "src/compiler/simplified-operator.h"
#include "src/objects/feedback-cell.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/scope-info.h"
@@ -316,8 +318,10 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
}
void JSGenericLowering::LowerJSLoadNamedFromSuper(Node* node) {
+ // TODO(marja, v8:9237): Call a builtin which collects feedback.
JSLoadNamedFromSuperNode n(node);
NamedAccess const& p = n.Parameters();
+ node->RemoveInput(2); // Feedback vector
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.name()));
ReplaceWithRuntimeCall(node, Runtime::kLoadFromSuper);
}
@@ -480,7 +484,21 @@ void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
}
void JSGenericLowering::LowerJSGetSuperConstructor(Node* node) {
- ReplaceWithBuiltinCall(node, Builtins::kGetSuperConstructor);
+ Node* active_function = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* function_map = effect = graph()->NewNode(
+ jsgraph()->simplified()->LoadField(AccessBuilder::ForMap()),
+ active_function, effect, control);
+
+ RelaxControls(node);
+ node->ReplaceInput(0, function_map);
+ node->ReplaceInput(1, effect);
+ node->ReplaceInput(2, control);
+ node->TrimInputCount(3);
+ NodeProperties::ChangeOp(node, jsgraph()->simplified()->LoadField(
+ AccessBuilder::ForMapPrototype()));
}
void JSGenericLowering::LowerJSHasInPrototypeChain(Node* node) {
@@ -828,9 +846,7 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
Node* slot = jsgraph()->Int32Constant(p.feedback().index());
Node* receiver = jsgraph()->UndefinedConstant();
-#ifdef V8_REVERSE_JSARGS
Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
-#endif
// Register argument inputs are followed by stack argument inputs (such as
// feedback_vector). Both are listed in ascending order. Note that
// the receiver is implicitly placed on the stack and is thus inserted
@@ -839,16 +855,10 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 3, stub_arity);
node->InsertInput(zone(), 4, slot);
-#ifdef V8_REVERSE_JSARGS
node->InsertInput(zone(), 5, feedback_vector);
node->InsertInput(zone(), 6, receiver);
// After: {code, target, new_target, arity, slot, vector, receiver,
// ...args}.
-#else
- node->InsertInput(zone(), 5, receiver);
- // After: {code, target, new_target, arity, slot, receiver, ...args,
- // vector}.
-#endif
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
@@ -897,9 +907,7 @@ void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* receiver = jsgraph()->UndefinedConstant();
Node* slot = jsgraph()->Int32Constant(p.feedback().index());
-#ifdef V8_REVERSE_JSARGS
Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
-#endif
// Register argument inputs are followed by stack argument inputs (such as
// feedback_vector). Both are listed in ascending order. Note that
// the receiver is implicitly placed on the stack and is thus inserted
@@ -907,16 +915,10 @@ void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
// TODO(jgruber): Implement a simpler way to specify these mutations.
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 4, slot);
-#ifdef V8_REVERSE_JSARGS
node->InsertInput(zone(), 5, feedback_vector);
node->InsertInput(zone(), 6, receiver);
// After: {code, target, new_target, arguments_list, slot, vector,
// receiver}.
-#else
- node->InsertInput(zone(), 5, receiver);
- // After: {code, target, new_target, arguments_list, slot, receiver,
- // vector}.
-#endif
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
@@ -972,10 +974,8 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
// on the stack here.
Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
Node* receiver = jsgraph()->UndefinedConstant();
-#ifdef V8_REVERSE_JSARGS
Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
Node* spread = node->RemoveInput(n.LastArgumentIndex());
-#endif
// Register argument inputs are followed by stack argument inputs (such as
// feedback_vector). Both are listed in ascending order. Note that
@@ -985,17 +985,11 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 3, stub_arity);
node->InsertInput(zone(), 4, slot);
-#ifdef V8_REVERSE_JSARGS
node->InsertInput(zone(), 5, spread);
node->InsertInput(zone(), 6, feedback_vector);
node->InsertInput(zone(), 7, receiver);
// After: {code, target, new_target, arity, slot, spread, vector, receiver,
// ...args}.
-#else
- node->InsertInput(zone(), 5, receiver);
- // After: {code, target, new_target, arity, slot, receiver, ...args, spread,
- // vector}.
-#endif
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
@@ -1179,20 +1173,14 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
// Shuffling inputs.
// Before: {target, receiver, ...args, spread, vector}.
-#ifdef V8_REVERSE_JSARGS
Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
-#endif
Node* spread = node->RemoveInput(n.LastArgumentIndex());
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
node->InsertInput(zone(), 3, spread);
node->InsertInput(zone(), 4, slot);
-#ifdef V8_REVERSE_JSARGS
node->InsertInput(zone(), 5, feedback_vector);
// After: {code, target, arity, spread, slot, vector, receiver, ...args}.
-#else
- // After: {code, target, arity, spread, slot, receiver, ...args, vector}.
-#endif
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
@@ -1230,12 +1218,79 @@ void JSGenericLowering::LowerJSCallRuntime(Node* node) {
ReplaceWithRuntimeCall(node, p.id(), static_cast<int>(p.arity()));
}
-void JSGenericLowering::LowerJSForInNext(Node* node) {
- UNREACHABLE(); // Eliminated in typed lowering.
+void JSGenericLowering::LowerJSForInPrepare(Node* node) {
+ JSForInPrepareNode n(node);
+ Effect effect(node); // {node} is kept in the effect chain.
+ Control control = n.control(); // .. but not in the control chain.
+ Node* enumerator = n.enumerator();
+ Node* slot =
+ jsgraph()->UintPtrConstant(n.Parameters().feedback().slot.ToInt());
+
+ std::vector<Edge> use_edges;
+ for (Edge edge : node->use_edges()) use_edges.push_back(edge);
+
+ // {node} will be changed to a builtin call (see below). The returned value
+ // is a fixed array containing {cache_array} and {cache_length}.
+ // TODO(jgruber): This is awkward; what we really want is two return values,
+ // the {cache_array} and {cache_length}, or better yet three return values
+ // s.t. we can avoid the graph rewrites below. Builtin support for multiple
+ // return types is unclear though.
+
+ Node* result_fixed_array = node;
+ Node* cache_type = enumerator; // Just to clarify the rename.
+ Node* cache_array;
+ Node* cache_length;
+
+ cache_array = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), result_fixed_array,
+ jsgraph()->IntPtrConstant(FixedArray::OffsetOfElementAt(0) -
+ kHeapObjectTag),
+ effect, control);
+ cache_length = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), result_fixed_array,
+ jsgraph()->IntPtrConstant(FixedArray::OffsetOfElementAt(1) -
+ kHeapObjectTag),
+ effect, control);
+
+ // Update the uses of {node}.
+ for (Edge edge : use_edges) {
+ Node* const user = edge.from();
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ } else if (NodeProperties::IsControlEdge(edge)) {
+ edge.UpdateTo(control);
+ } else {
+ DCHECK(NodeProperties::IsValueEdge(edge));
+ switch (ProjectionIndexOf(user->op())) {
+ case 0:
+ Replace(user, cache_type);
+ break;
+ case 1:
+ Replace(user, cache_array);
+ break;
+ case 2:
+ Replace(user, cache_length);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+
+ // Finally, change the original node into a builtin call. This happens here,
+ // after graph rewrites, since the Call does not have a control output and
+ // thus must not have any control uses. Any previously existing control
+ // outputs have been replaced by the graph rewrite above.
+ node->InsertInput(zone(), n.FeedbackVectorIndex(), slot);
+ ReplaceWithBuiltinCall(node, Builtins::kForInPrepare);
}
-void JSGenericLowering::LowerJSForInPrepare(Node* node) {
- UNREACHABLE(); // Eliminated in typed lowering.
+void JSGenericLowering::LowerJSForInNext(Node* node) {
+ JSForInNextNode n(node);
+ node->InsertInput(
+ zone(), 0,
+ jsgraph()->UintPtrConstant(n.Parameters().feedback().slot.ToInt()));
+ ReplaceWithBuiltinCall(node, Builtins::kForInNext);
}
void JSGenericLowering::LowerJSLoadMessage(Node* node) {
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index a17b615b3b..e86bb594ba 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -34,6 +34,9 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
simplified_(simplified) {
}
+ JSGraph(const JSGraph&) = delete;
+ JSGraph& operator=(const JSGraph&) = delete;
+
// CEntryStubs are cached depending on the result size and other flags.
Node* CEntryStubConstant(int result_size,
SaveFPRegsMode save_doubles = kDontSaveFPRegs,
@@ -132,8 +135,6 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
// Internal helper to canonicalize a number constant.
Node* NumberConstant(double value);
-
- DISALLOW_COPY_AND_ASSIGN(JSGraph);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index e3e009bc45..f7193ec944 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -50,7 +50,7 @@ HEAP_BROKER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
#undef FORWARD_DECL
-// There are three kinds of ObjectData values.
+// There are five kinds of ObjectData values.
//
// kSmi: The underlying V8 object is a Smi and the data is an instance of the
// base class (ObjectData), i.e. it's basically just the handle. Because the
@@ -65,6 +65,12 @@ HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
// data is an instance of the base class (ObjectData), i.e. it basically
// carries no information other than the handle.
//
+// kNeverSerializedHeapObject: The underlying V8 object is a (potentially
+// mutable) HeapObject and the data is an instance of ObjectData. Its handle
+// must be persistent so that the GC can update it at a safepoint. Via this
+// handle, the object can be accessed concurrently to the main thread. To be
+// used the flag --turbo-direct-heap-access must be on.
+//
// kUnserializedReadOnlyHeapObject: The underlying V8 object is a read-only
// HeapObject and the data is an instance of ObjectData. For
// ReadOnlyHeapObjects, it is OK to access heap even from off-thread, so
@@ -297,7 +303,9 @@ CallHandlerInfoData::CallHandlerInfoData(JSHeapBroker* broker,
ObjectData** storage,
Handle<CallHandlerInfo> object)
: HeapObjectData(broker, storage, object),
- callback_(v8::ToCData<Address>(object->callback())) {}
+ callback_(v8::ToCData<Address>(object->callback())) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+}
// These definitions are here in order to please the linker, which in debug mode
// sometimes requires static constants to be defined in .cc files.
@@ -327,8 +335,14 @@ void FunctionTemplateInfoData::SerializeCallCode(JSHeapBroker* broker) {
TraceScope tracer(broker, this,
"FunctionTemplateInfoData::SerializeCallCode");
auto function_template_info = Handle<FunctionTemplateInfo>::cast(object());
- call_code_ = broker->GetOrCreateData(function_template_info->call_code());
- if (!call_code_->should_access_heap()) {
+ call_code_ =
+ broker->GetOrCreateData(function_template_info->call_code(kAcquireLoad));
+ if (call_code_->should_access_heap()) {
+ // TODO(mvstanton): When ObjectRef is in the never serialized list, this
+ // code can be removed.
+ broker->GetOrCreateData(
+ Handle<CallHandlerInfo>::cast(call_code_->object())->data());
+ } else {
call_code_->AsCallHandlerInfo()->Serialize(broker);
}
}
@@ -614,7 +628,7 @@ class JSBoundFunctionData : public JSObjectData {
JSBoundFunctionData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSBoundFunction> object);
- void Serialize(JSHeapBroker* broker);
+ bool Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
ObjectData* bound_target_function() const { return bound_target_function_; }
@@ -650,6 +664,7 @@ class JSFunctionData : public JSObjectData {
ObjectData* initial_map() const { return initial_map_; }
ObjectData* prototype() const { return prototype_; }
ObjectData* shared() const { return shared_; }
+ ObjectData* raw_feedback_cell() const { return feedback_cell_; }
ObjectData* feedback_vector() const { return feedback_vector_; }
ObjectData* code() const { return code_; }
int initial_map_instance_size_with_min_slack() const {
@@ -672,6 +687,7 @@ class JSFunctionData : public JSObjectData {
ObjectData* prototype_ = nullptr;
ObjectData* shared_ = nullptr;
ObjectData* feedback_vector_ = nullptr;
+ ObjectData* feedback_cell_ = nullptr;
ObjectData* code_ = nullptr;
int initial_map_instance_size_with_min_slack_;
};
@@ -831,32 +847,44 @@ class StringData : public NameData {
// element access (s[i]). The first pair component is always less than
// {length_}. The second component is never nullptr.
ZoneVector<std::pair<uint32_t, ObjectData*>> chars_as_strings_;
-
- static constexpr int kMaxLengthForDoubleConversion = 23;
};
class SymbolData : public NameData {
public:
SymbolData(JSHeapBroker* broker, ObjectData** storage, Handle<Symbol> object)
- : NameData(broker, storage, object) {}
+ : NameData(broker, storage, object) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+ }
};
+namespace {
+
+// String to double helper without heap allocation.
+base::Optional<double> StringToDouble(Handle<String> object) {
+ const int kMaxLengthForDoubleConversion = 23;
+ String string = *object;
+ int length = string.length();
+ if (length <= kMaxLengthForDoubleConversion) {
+ const int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
+ uc16 buffer[kMaxLengthForDoubleConversion];
+ String::WriteToFlat(*object, buffer, 0, length);
+ Vector<const uc16> v(buffer, length);
+ return StringToDouble(v, flags);
+ }
+ return base::nullopt;
+}
+
+} // namespace
+
StringData::StringData(JSHeapBroker* broker, ObjectData** storage,
Handle<String> object)
: NameData(broker, storage, object),
length_(object->length()),
first_char_(length_ > 0 ? object->Get(0) : 0),
+ to_number_(StringToDouble(object)),
is_external_string_(object->IsExternalString()),
is_seq_string_(object->IsSeqString()),
- chars_as_strings_(broker->zone()) {
- if (length_ <= kMaxLengthForDoubleConversion) {
- const int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
- uc16 buffer[kMaxLengthForDoubleConversion];
- String::WriteToFlat(*object, buffer, 0, length_);
- Vector<const uc16> v(buffer, length_);
- to_number_ = StringToDouble(v, flags);
- }
-}
+ chars_as_strings_(broker->zone()) {}
class InternalizedStringData : public StringData {
public:
@@ -941,8 +969,8 @@ bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
}
// Check the in-object properties.
- Handle<DescriptorArray> descriptors(boilerplate->map().instance_descriptors(),
- isolate);
+ Handle<DescriptorArray> descriptors(
+ boilerplate->map().instance_descriptors(kRelaxedLoad), isolate);
for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != kField) continue;
@@ -1232,7 +1260,7 @@ namespace {
bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) {
DCHECK(!jsarray_map->is_dictionary_map());
Handle<Name> length_string = isolate->factory()->length_string();
- DescriptorArray descriptors = jsarray_map->instance_descriptors();
+ DescriptorArray descriptors = jsarray_map->instance_descriptors(kRelaxedLoad);
// TODO(jkummerow): We could skip the search and hardcode number == 0.
InternalIndex number = descriptors.Search(*length_string, *jsarray_map);
DCHECK(number.is_found());
@@ -1305,12 +1333,14 @@ void JSFunctionData::Serialize(JSHeapBroker* broker) {
DCHECK_NULL(initial_map_);
DCHECK_NULL(prototype_);
DCHECK_NULL(shared_);
+ DCHECK_NULL(feedback_cell_);
DCHECK_NULL(feedback_vector_);
DCHECK_NULL(code_);
context_ = broker->GetOrCreateData(function->context());
native_context_ = broker->GetOrCreateData(function->native_context());
shared_ = broker->GetOrCreateData(function->shared());
+ feedback_cell_ = broker->GetOrCreateData(function->raw_feedback_cell());
feedback_vector_ = has_feedback_vector()
? broker->GetOrCreateData(function->feedback_vector())
: nullptr;
@@ -1488,23 +1518,37 @@ JSBoundFunctionData::JSBoundFunctionData(JSHeapBroker* broker,
Handle<JSBoundFunction> object)
: JSObjectData(broker, storage, object) {}
-void JSBoundFunctionData::Serialize(JSHeapBroker* broker) {
- if (serialized_) return;
- serialized_ = true;
+bool JSBoundFunctionData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return true;
+ if (broker->StackHasOverflowed()) return false;
TraceScope tracer(broker, this, "JSBoundFunctionData::Serialize");
Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(object());
+ // We don't immediately set {serialized_} in order to correctly handle the
+ // case where a recursive call to this method reaches the stack limit.
+
DCHECK_NULL(bound_target_function_);
bound_target_function_ =
broker->GetOrCreateData(function->bound_target_function());
+ bool serialized_nested = true;
if (!bound_target_function_->should_access_heap()) {
if (bound_target_function_->IsJSBoundFunction()) {
- bound_target_function_->AsJSBoundFunction()->Serialize(broker);
+ serialized_nested =
+ bound_target_function_->AsJSBoundFunction()->Serialize(broker);
} else if (bound_target_function_->IsJSFunction()) {
bound_target_function_->AsJSFunction()->Serialize(broker);
}
}
+ if (!serialized_nested) {
+ // We couldn't serialize all nested bound functions due to stack
+ // overflow. Give up.
+ DCHECK(!serialized_);
+ bound_target_function_ = nullptr; // Reset to sync with serialized_.
+ return false;
+ }
+
+ serialized_ = true;
DCHECK_NULL(bound_arguments_);
bound_arguments_ = broker->GetOrCreateData(function->bound_arguments());
@@ -1514,6 +1558,8 @@ void JSBoundFunctionData::Serialize(JSHeapBroker* broker) {
DCHECK_NULL(bound_this_);
bound_this_ = broker->GetOrCreateData(function->bound_this());
+
+ return true;
}
JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
@@ -1697,17 +1743,17 @@ class ScopeInfoData : public HeapObjectData {
ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<ScopeInfo> object);
- int context_length() const { return context_length_; }
- bool has_outer_scope_info() const { return has_outer_scope_info_; }
- int flags() const { return flags_; }
+ int ContextLength() const { return context_length_; }
+ bool HasContextExtensionSlot() const { return has_context_extension_slot_; }
+ bool HasOuterScopeInfo() const { return has_outer_scope_info_; }
- ObjectData* outer_scope_info() const { return outer_scope_info_; }
+ ObjectData* OuterScopeInfo() const { return outer_scope_info_; }
void SerializeScopeInfoChain(JSHeapBroker* broker);
private:
int const context_length_;
+ bool const has_context_extension_slot_;
bool const has_outer_scope_info_;
- int const flags_;
// Only serialized via SerializeScopeInfoChain.
ObjectData* outer_scope_info_;
@@ -1717,9 +1763,11 @@ ScopeInfoData::ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<ScopeInfo> object)
: HeapObjectData(broker, storage, object),
context_length_(object->ContextLength()),
+ has_context_extension_slot_(object->HasContextExtensionSlot()),
has_outer_scope_info_(object->HasOuterScopeInfo()),
- flags_(object->Flags()),
- outer_scope_info_(nullptr) {}
+ outer_scope_info_(nullptr) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+}
void ScopeInfoData::SerializeScopeInfoChain(JSHeapBroker* broker) {
if (outer_scope_info_) return;
@@ -1739,6 +1787,9 @@ class SharedFunctionInfoData : public HeapObjectData {
int builtin_id() const { return builtin_id_; }
int context_header_size() const { return context_header_size_; }
ObjectData* GetBytecodeArray() const { return GetBytecodeArray_; }
+ SharedFunctionInfo::Inlineability GetInlineability() const {
+ return inlineability_;
+ }
void SerializeFunctionTemplateInfo(JSHeapBroker* broker);
ObjectData* scope_info() const { return scope_info_; }
void SerializeScopeInfoChain(JSHeapBroker* broker);
@@ -1762,11 +1813,12 @@ class SharedFunctionInfoData : public HeapObjectData {
private:
int const builtin_id_;
- int context_header_size_;
+ int const context_header_size_;
ObjectData* const GetBytecodeArray_;
#define DECL_MEMBER(type, name) type const name##_;
BROKER_SFI_FIELDS(DECL_MEMBER)
#undef DECL_MEMBER
+ SharedFunctionInfo::Inlineability const inlineability_;
ObjectData* function_template_info_;
ZoneMap<int, ObjectData*> template_objects_;
ObjectData* scope_info_;
@@ -1787,6 +1839,7 @@ SharedFunctionInfoData::SharedFunctionInfoData(
BROKER_SFI_FIELDS(INIT_MEMBER)
#undef INIT_MEMBER
,
+ inlineability_(object->GetInlineability()),
function_template_info_(nullptr),
template_objects_(broker->zone()),
scope_info_(nullptr) {
@@ -1798,7 +1851,7 @@ void SharedFunctionInfoData::SerializeFunctionTemplateInfo(
JSHeapBroker* broker) {
if (function_template_info_) return;
function_template_info_ = broker->GetOrCreateData(
- Handle<SharedFunctionInfo>::cast(object())->function_data());
+ Handle<SharedFunctionInfo>::cast(object())->function_data(kAcquireLoad));
}
void SharedFunctionInfoData::SerializeScopeInfoChain(JSHeapBroker* broker) {
@@ -2126,8 +2179,9 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
Handle<Map> map = Handle<Map>::cast(object());
if (instance_descriptors_ == nullptr) {
- instance_descriptors_ = broker->GetOrCreateData(map->instance_descriptors())
- ->AsDescriptorArray();
+ instance_descriptors_ =
+ broker->GetOrCreateData(map->instance_descriptors(kRelaxedLoad))
+ ->AsDescriptorArray();
}
ZoneMap<int, PropertyDescriptor>& contents =
@@ -2138,7 +2192,7 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
Isolate* const isolate = broker->isolate();
auto descriptors =
Handle<DescriptorArray>::cast(instance_descriptors_->object());
- CHECK_EQ(*descriptors, map->instance_descriptors());
+ CHECK_EQ(*descriptors, map->instance_descriptors(kRelaxedLoad));
PropertyDescriptor d;
d.key = broker->GetOrCreateData(descriptors->GetKey(descriptor_index));
@@ -2252,8 +2306,8 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
CHECK_EQ(inobject_fields_.size(), 0u);
// Check the in-object properties.
- Handle<DescriptorArray> descriptors(boilerplate->map().instance_descriptors(),
- isolate);
+ Handle<DescriptorArray> descriptors(
+ boilerplate->map().instance_descriptors(kRelaxedLoad), isolate);
for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != kField) continue;
@@ -2388,7 +2442,7 @@ SourceTextModuleRef ContextRef::GetModule(SerializationPolicy policy) const {
JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
bool tracing_enabled, bool is_concurrent_inlining,
- bool is_native_context_independent)
+ CodeKind code_kind)
: isolate_(isolate),
zone_(broker_zone),
refs_(zone()->New<RefsMap>(kMinimalRefsBucketCount, AddressMatcher(),
@@ -2397,8 +2451,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
array_and_object_prototypes_(zone()),
tracing_enabled_(tracing_enabled),
is_concurrent_inlining_(is_concurrent_inlining),
- is_native_context_independent_(is_native_context_independent),
- local_heap_(base::nullopt),
+ code_kind_(code_kind),
feedback_(zone()),
bytecode_analyses_(zone()),
property_access_infos_(zone()),
@@ -2413,7 +2466,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
TRACE(this, "Constructing heap broker");
}
-JSHeapBroker::~JSHeapBroker() { DCHECK(!local_heap_); }
+JSHeapBroker::~JSHeapBroker() { DCHECK_NULL(local_isolate_); }
void JSHeapBroker::SetPersistentAndCopyCanonicalHandlesForTesting(
std::unique_ptr<PersistentHandles> persistent_handles,
@@ -2432,7 +2485,7 @@ void JSHeapBroker::CopyCanonicalHandlesForTesting(
for (auto it = it_scope.begin(); it != it_scope.end(); ++it) {
Address* entry = *it.entry();
Object key = it.key();
- canonical_handles_->Set(key, entry);
+ canonical_handles_->Insert(key, entry);
}
}
@@ -2443,20 +2496,24 @@ std::string JSHeapBroker::Trace() const {
return oss.str();
}
-void JSHeapBroker::InitializeLocalHeap(OptimizedCompilationInfo* info) {
- set_persistent_handles(info->DetachPersistentHandles());
+void JSHeapBroker::AttachLocalIsolate(OptimizedCompilationInfo* info,
+ LocalIsolate* local_isolate) {
set_canonical_handles(info->DetachCanonicalHandles());
- DCHECK(!local_heap_);
- local_heap_.emplace(isolate_->heap(), std::move(ph_));
+ DCHECK_NULL(local_isolate_);
+ local_isolate_ = local_isolate;
+ DCHECK_NOT_NULL(local_isolate_);
+ local_isolate_->heap()->AttachPersistentHandles(
+ info->DetachPersistentHandles());
}
-void JSHeapBroker::TearDownLocalHeap(OptimizedCompilationInfo* info) {
+void JSHeapBroker::DetachLocalIsolate(OptimizedCompilationInfo* info) {
DCHECK_NULL(ph_);
- DCHECK(local_heap_);
- ph_ = local_heap_->DetachPersistentHandles();
- local_heap_.reset();
+ DCHECK_NOT_NULL(local_isolate_);
+ std::unique_ptr<PersistentHandles> ph =
+ local_isolate_->heap()->DetachPersistentHandles();
+ local_isolate_ = nullptr;
info->set_canonical_handles(DetachCanonicalHandles());
- info->set_persistent_handles(DetachPersistentHandles());
+ info->set_persistent_handles(std::move(ph));
}
void JSHeapBroker::StopSerializing() {
@@ -3069,7 +3126,9 @@ PropertyDetails MapRef::GetPropertyDetails(
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
- return object()->instance_descriptors().GetDetails(descriptor_index);
+ return object()
+ ->instance_descriptors(kRelaxedLoad)
+ .GetDetails(descriptor_index);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
return descriptors->contents().at(descriptor_index.as_int()).details;
@@ -3081,10 +3140,10 @@ NameRef MapRef::GetPropertyKey(InternalIndex descriptor_index) const {
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
- return NameRef(
- broker(),
- broker()->CanonicalPersistentHandle(
- object()->instance_descriptors().GetKey(descriptor_index)));
+ return NameRef(broker(), broker()->CanonicalPersistentHandle(
+ object()
+ ->instance_descriptors(kRelaxedLoad)
+ .GetKey(descriptor_index)));
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
return NameRef(broker(),
@@ -3124,9 +3183,10 @@ ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
- Handle<FieldType> field_type(
- object()->instance_descriptors().GetFieldType(descriptor_index),
- broker()->isolate());
+ Handle<FieldType> field_type(object()
+ ->instance_descriptors(kRelaxedLoad)
+ .GetFieldType(descriptor_index),
+ broker()->isolate());
return ObjectRef(broker(), field_type);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
@@ -3163,10 +3223,7 @@ base::Optional<double> StringRef::ToNumber() {
broker()->mode());
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
- AllowHeapAllocationIfNeeded allow_heap_allocation(data()->kind(),
- broker()->mode());
- int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
- return StringToDouble(broker()->isolate(), object(), flags);
+ return StringToDouble(object());
}
return data()->AsString()->to_number();
}
@@ -3302,8 +3359,17 @@ int BytecodeArrayRef::handler_table_size() const {
return BitField::decode(ObjectRef::data()->As##holder()->field()); \
}
-// Like IF_ACCESS_FROM_HEAP_C but we also allow direct heap access for
+// Like IF_ACCESS_FROM_HEAP[_C] but we also allow direct heap access for
// kSerialized only for methods that we identified to be safe.
+#define IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name) \
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { \
+ AllowHandleAllocationIfNeeded handle_allocation( \
+ data_->kind(), broker()->mode(), FLAG_turbo_direct_heap_access); \
+ AllowHandleDereferenceIfNeeded allow_handle_dereference( \
+ data_->kind(), broker()->mode(), FLAG_turbo_direct_heap_access); \
+ return result##Ref(broker(), \
+ broker()->CanonicalPersistentHandle(object()->name())); \
+ }
#define IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name) \
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { \
AllowHandleAllocationIfNeeded handle_allocation( \
@@ -3313,10 +3379,15 @@ int BytecodeArrayRef::handler_table_size() const {
return object()->name(); \
}
-// Like BIMODAL_ACCESSOR_C except that we force a direct heap access if
+// Like BIMODAL_ACCESSOR[_C] except that we force a direct heap access if
// FLAG_turbo_direct_heap_access is true (even for kSerialized). This is because
// we identified the method to be safe to use direct heap access, but the
// holder##Data class still needs to be serialized.
+#define BIMODAL_ACCESSOR_WITH_FLAG(holder, result, name) \
+ result##Ref holder##Ref::name() const { \
+ IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name); \
+ return result##Ref(broker(), ObjectRef::data()->As##holder()->name()); \
+ }
#define BIMODAL_ACCESSOR_WITH_FLAG_C(holder, result, name) \
result holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
@@ -3360,6 +3431,7 @@ BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
BIMODAL_ACCESSOR(JSFunction, Map, initial_map)
BIMODAL_ACCESSOR(JSFunction, Object, prototype)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
+BIMODAL_ACCESSOR(JSFunction, FeedbackCell, raw_feedback_cell)
BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector)
BIMODAL_ACCESSOR(JSFunction, Code, code)
@@ -3411,8 +3483,8 @@ BIMODAL_ACCESSOR_C(PropertyCell, PropertyDetails, property_details)
base::Optional<CallHandlerInfoRef> FunctionTemplateInfoRef::call_code() const {
if (data_->should_access_heap()) {
- return CallHandlerInfoRef(
- broker(), broker()->CanonicalPersistentHandle(object()->call_code()));
+ return CallHandlerInfoRef(broker(), broker()->CanonicalPersistentHandle(
+ object()->call_code(kAcquireLoad)));
}
ObjectData* call_code = data()->AsFunctionTemplateInfo()->call_code();
if (!call_code) return base::nullopt;
@@ -3517,12 +3589,19 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
BIMODAL_ACCESSOR(CallHandlerInfo, Object, data)
+BIMODAL_ACCESSOR_C(ScopeInfo, int, ContextLength)
+BIMODAL_ACCESSOR_C(ScopeInfo, bool, HasContextExtensionSlot)
+BIMODAL_ACCESSOR_C(ScopeInfo, bool, HasOuterScopeInfo)
+BIMODAL_ACCESSOR(ScopeInfo, ScopeInfo, OuterScopeInfo)
+
BIMODAL_ACCESSOR_C(SharedFunctionInfo, int, builtin_id)
BIMODAL_ACCESSOR(SharedFunctionInfo, BytecodeArray, GetBytecodeArray)
#define DEF_SFI_ACCESSOR(type, name) \
- BIMODAL_ACCESSOR_C(SharedFunctionInfo, type, name)
+ BIMODAL_ACCESSOR_WITH_FLAG_C(SharedFunctionInfo, type, name)
BROKER_SFI_FIELDS(DEF_SFI_ACCESSOR)
#undef DEF_SFI_ACCESSOR
+BIMODAL_ACCESSOR_C(SharedFunctionInfo, SharedFunctionInfo::Inlineability,
+ GetInlineability)
BIMODAL_ACCESSOR_C(String, int, length)
@@ -3534,7 +3613,7 @@ base::Optional<ObjectRef> MapRef::GetStrongValue(
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
MaybeObject value =
- object()->instance_descriptors().GetValue(descriptor_index);
+ object()->instance_descriptors(kRelaxedLoad).GetValue(descriptor_index);
HeapObject object;
if (value.GetHeapObjectIfStrong(&object)) {
return ObjectRef(broker(), broker()->CanonicalPersistentHandle((object)));
@@ -3616,37 +3695,6 @@ int MapRef::GetInObjectProperties() const {
return data()->AsMap()->in_object_properties();
}
-int ScopeInfoRef::ContextLength() const {
- IF_ACCESS_FROM_HEAP_C(ContextLength);
- return data()->AsScopeInfo()->context_length();
-}
-
-int ScopeInfoRef::Flags() const {
- IF_ACCESS_FROM_HEAP_C(Flags);
- return data()->AsScopeInfo()->flags();
-}
-
-bool ScopeInfoRef::HasContextExtension() const {
- return ScopeInfo::HasContextExtensionSlotBit::decode(Flags());
-}
-
-bool ScopeInfoRef::HasOuterScopeInfo() const {
- IF_ACCESS_FROM_HEAP_C(HasOuterScopeInfo);
- return data()->AsScopeInfo()->has_outer_scope_info();
-}
-
-ScopeInfoRef ScopeInfoRef::OuterScopeInfo() const {
- if (data_->should_access_heap()) {
- AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
- broker()->mode());
- return ScopeInfoRef(broker(), broker()->CanonicalPersistentHandle(
- object()->OuterScopeInfo()));
- }
- return ScopeInfoRef(broker(), data()->AsScopeInfo()->outer_scope_info());
-}
-
void ScopeInfoRef::SerializeScopeInfoChain() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
@@ -3810,19 +3858,10 @@ base::Optional<ObjectRef> ObjectRef::GetOwnConstantElement(
uint32_t index, SerializationPolicy policy) const {
if (!(IsJSObject() || IsString())) return base::nullopt;
if (data_->should_access_heap()) {
- // TODO(neis): Once the CHECK_NE below is eliminated, i.e. once we can
- // safely read from the background thread, the special branch for read-only
- // objects can be removed as well.
- if (data_->kind() == ObjectDataKind::kUnserializedReadOnlyHeapObject) {
- DCHECK(IsString());
- // TODO(mythria): For ReadOnly strings, currently we cannot access data
- // from heap without creating handles since we use LookupIterator. We
- // should have a custom implementation for read only strings that doesn't
- // create handles. Till then it is OK to disable this optimization since
- // this only impacts keyed accesses on read only strings.
- return base::nullopt;
- }
- CHECK_NE(data_->kind(), ObjectDataKind::kNeverSerializedHeapObject);
+ // TODO(solanes, neis, v8:7790, v8:11012): Re-enable this optmization for
+ // concurrent inlining when we have the infrastructure to safely do so.
+ if (broker()->is_concurrent_inlining() && IsString()) return base::nullopt;
+ CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject);
return GetOwnElementFromHeap(broker(), object(), index, true);
}
ObjectData* element = nullptr;
@@ -4264,8 +4303,8 @@ SharedFunctionInfoRef::function_template_info() const {
if (data_->should_access_heap()) {
if (object()->IsApiFunction()) {
return FunctionTemplateInfoRef(
- broker(),
- broker()->CanonicalPersistentHandle(object()->function_data()));
+ broker(), broker()->CanonicalPersistentHandle(
+ object()->function_data(kAcquireLoad)));
}
return base::nullopt;
}
@@ -4361,10 +4400,10 @@ bool JSTypedArrayRef::serialized() const {
return data()->AsJSTypedArray()->serialized();
}
-void JSBoundFunctionRef::Serialize() {
- if (data_->should_access_heap()) return;
+bool JSBoundFunctionRef::Serialize() {
+ if (data_->should_access_heap()) return true;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSBoundFunction()->Serialize(broker());
+ return data()->AsJSBoundFunction()->Serialize(broker());
}
void PropertyCellRef::Serialize() {
@@ -4466,7 +4505,6 @@ GlobalAccessFeedback::GlobalAccessFeedback(PropertyCellRef cell,
GlobalAccessFeedback::GlobalAccessFeedback(FeedbackSlotKind slot_kind)
: ProcessedFeedback(kGlobalAccess, slot_kind),
- cell_or_context_(base::nullopt),
index_and_immutable_(0 /* doesn't matter */) {
DCHECK(IsGlobalICKind(slot_kind));
}
@@ -4599,11 +4637,11 @@ bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
MinimorphicLoadPropertyAccessFeedback::MinimorphicLoadPropertyAccessFeedback(
NameRef const& name, FeedbackSlotKind slot_kind, Handle<Object> handler,
- MaybeHandle<Map> maybe_map, bool has_migration_target_maps)
+ ZoneVector<Handle<Map>> const& maps, bool has_migration_target_maps)
: ProcessedFeedback(kMinimorphicPropertyAccess, slot_kind),
name_(name),
handler_(handler),
- maybe_map_(maybe_map),
+ maps_(maps),
has_migration_target_maps_(has_migration_target_maps) {
DCHECK(IsLoadICKind(slot_kind));
}
@@ -4645,14 +4683,15 @@ FeedbackSlotKind JSHeapBroker::GetFeedbackSlotKind(
ProcessedFeedback const& processed = GetFeedback(source);
return processed.slot_kind();
}
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
return nexus.kind();
}
bool JSHeapBroker::FeedbackIsInsufficient(FeedbackSource const& source) const {
- return is_concurrent_inlining_
- ? GetFeedback(source).IsInsufficient()
- : FeedbackNexus(source.vector, source.slot).IsUninitialized();
+ return is_concurrent_inlining_ ? GetFeedback(source).IsInsufficient()
+ : FeedbackNexus(source.vector, source.slot,
+ feedback_nexus_config())
+ .IsUninitialized();
}
namespace {
@@ -4678,9 +4717,11 @@ void FilterRelevantReceiverMaps(Isolate* isolate, MapHandles* maps) {
MaybeObjectHandle TryGetMinimorphicHandler(
std::vector<MapAndHandler> const& maps_and_handlers, FeedbackSlotKind kind,
- Handle<NativeContext> native_context) {
- if (!FLAG_dynamic_map_checks || !IsLoadICKind(kind))
+ Handle<NativeContext> native_context, bool is_turboprop) {
+ if (!is_turboprop || !FLAG_turboprop_dynamic_map_checks ||
+ !IsLoadICKind(kind)) {
return MaybeObjectHandle();
+ }
// Don't use dynamic map checks when loading properties from Array.prototype.
// Using dynamic map checks prevents constant folding and hence does not
@@ -4737,7 +4778,7 @@ const ProcessedFeedback& JSHeapBroker::NewInsufficientFeedback(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
base::Optional<NameRef> static_name) {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
FeedbackSlotKind kind = nexus.kind();
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(kind);
@@ -4751,15 +4792,13 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
base::Optional<NameRef> name =
static_name.has_value() ? static_name : GetNameFeedback(nexus);
MaybeObjectHandle handler = TryGetMinimorphicHandler(
- maps_and_handlers, kind, target_native_context().object());
+ maps_and_handlers, kind, target_native_context().object(),
+ is_turboprop());
if (!handler.is_null()) {
- MaybeHandle<Map> maybe_map;
- if (nexus.ic_state() == MONOMORPHIC) {
- DCHECK_EQ(maps.size(), 1);
- maybe_map = maps[0];
- }
return *zone()->New<MinimorphicLoadPropertyAccessFeedback>(
- *name, kind, handler.object(), maybe_map, HasMigrationTargets(maps));
+ *name, kind, handler.object(),
+ ZoneVector<Handle<Map>>(maps.begin(), maps.end(), zone()),
+ HasMigrationTargets(maps));
}
FilterRelevantReceiverMaps(isolate(), &maps);
@@ -4839,7 +4878,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForBinaryOperation(
FeedbackSource const& source) const {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
BinaryOperationHint hint = nexus.GetBinaryOperationFeedback();
DCHECK_NE(hint, BinaryOperationHint::kNone); // Not uninitialized.
@@ -4848,7 +4887,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForBinaryOperation(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCompareOperation(
FeedbackSource const& source) const {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
CompareOperationHint hint = nexus.GetCompareOperationFeedback();
DCHECK_NE(hint, CompareOperationHint::kNone); // Not uninitialized.
@@ -4857,7 +4896,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCompareOperation(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForForIn(
FeedbackSource const& source) const {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
ForInHint hint = nexus.GetForInFeedback();
DCHECK_NE(hint, ForInHint::kNone); // Not uninitialized.
@@ -4866,7 +4905,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForForIn(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf(
FeedbackSource const& source) {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
base::Optional<JSObjectRef> optional_constructor;
@@ -4882,7 +4921,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForArrayOrObjectLiteral(
FeedbackSource const& source) {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
HeapObject object;
@@ -4900,7 +4939,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForArrayOrObjectLiteral(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForRegExpLiteral(
FeedbackSource const& source) {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
HeapObject object;
@@ -4915,7 +4954,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForRegExpLiteral(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForTemplateObject(
FeedbackSource const& source) {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
HeapObject object;
@@ -4929,7 +4968,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForTemplateObject(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall(
FeedbackSource const& source) {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
base::Optional<HeapObjectRef> target_ref;
@@ -5336,6 +5375,14 @@ BytecodeAnalysis const& JSHeapBroker::GetBytecodeAnalysis(
return *analysis;
}
+bool JSHeapBroker::StackHasOverflowed() const {
+ DCHECK_IMPLIES(local_isolate_ == nullptr,
+ ThreadId::Current() == isolate_->thread_id());
+ return (local_isolate_ != nullptr)
+ ? StackLimitCheck::HasOverflowed(local_isolate_)
+ : StackLimitCheck(isolate_).HasOverflowed();
+}
+
OffHeapBytecodeArray::OffHeapBytecodeArray(BytecodeArrayRef bytecode_array)
: array_(bytecode_array) {}
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index d2bfbace26..a9be949566 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -14,10 +14,12 @@
#include "src/compiler/processed-feedback.h"
#include "src/compiler/refs-map.h"
#include "src/compiler/serializer-hints.h"
+#include "src/execution/local-isolate.h"
#include "src/handles/handles.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/local-heap.h"
#include "src/interpreter/bytecode-array-accessor.h"
+#include "src/objects/code-kind.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/function-kind.h"
#include "src/objects/objects.h"
@@ -32,6 +34,7 @@ namespace compiler {
class BytecodeAnalysis;
class ObjectRef;
+
std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
#define TRACE_BROKER(broker, x) \
@@ -78,13 +81,13 @@ struct PropertyAccessTarget {
class V8_EXPORT_PRIVATE JSHeapBroker {
public:
JSHeapBroker(Isolate* isolate, Zone* broker_zone, bool tracing_enabled,
- bool is_concurrent_inlining, bool is_native_context_independent);
+ bool is_concurrent_inlining, CodeKind code_kind);
// For use only in tests, sets default values for some arguments. Avoids
// churn when new flags are added.
JSHeapBroker(Isolate* isolate, Zone* broker_zone)
: JSHeapBroker(isolate, broker_zone, FLAG_trace_heap_broker, false,
- false) {}
+ CodeKind::TURBOFAN) {}
~JSHeapBroker();
@@ -102,7 +105,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
bool tracing_enabled() const { return tracing_enabled_; }
bool is_concurrent_inlining() const { return is_concurrent_inlining_; }
bool is_native_context_independent() const {
- return is_native_context_independent_;
+ return code_kind_ == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
}
bool generate_full_feedback_collection() const {
// NCI code currently collects full feedback.
@@ -110,19 +113,33 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
CollectFeedbackInGenericLowering());
return is_native_context_independent();
}
+ bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; }
+
+ NexusConfig feedback_nexus_config() const {
+ // TODO(mvstanton): when the broker gathers feedback on the background
+ // thread, this should return a local NexusConfig object which points
+ // to the associated LocalHeap.
+ return NexusConfig::FromMainThread(isolate());
+ }
enum BrokerMode { kDisabled, kSerializing, kSerialized, kRetired };
BrokerMode mode() const { return mode_; }
- // Initialize the local heap with the persistent and canonical handles
- // provided by {info}.
- void InitializeLocalHeap(OptimizedCompilationInfo* info);
- // Tear down the local heap and pass the persistent and canonical handles
- // provided back to {info}. {info} is responsible for disposing of them.
- void TearDownLocalHeap(OptimizedCompilationInfo* info);
+
void StopSerializing();
void Retire();
bool SerializingAllowed() const;
+ // Remember the local isolate and initialize its local heap with the
+ // persistent and canonical handles provided by {info}.
+ void AttachLocalIsolate(OptimizedCompilationInfo* info,
+ LocalIsolate* local_isolate);
+ // Forget about the local isolate and pass the persistent and canonical
+ // handles provided back to {info}. {info} is responsible for disposing of
+ // them.
+ void DetachLocalIsolate(OptimizedCompilationInfo* info);
+
+ bool StackHasOverflowed() const;
+
#ifdef DEBUG
void PrintRefsAnalysis() const;
#endif // DEBUG
@@ -225,9 +242,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
bool IsSerializedForCompilation(const SharedFunctionInfoRef& shared,
const FeedbackVectorRef& feedback) const;
- LocalHeap* local_heap() {
- return local_heap_.has_value() ? &(*local_heap_) : nullptr;
- }
+ LocalIsolate* local_isolate() const { return local_isolate_; }
// Return the corresponding canonical persistent handle for {object}. Create
// one if it does not exist.
@@ -248,13 +263,14 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
}
Object obj(address);
- Address** entry = canonical_handles_->Get(obj);
- if (*entry == nullptr) {
+ auto find_result = canonical_handles_->FindOrInsert(obj);
+ if (!find_result.already_exists) {
// Allocate new PersistentHandle if one wasn't created before.
- DCHECK(local_heap_);
- *entry = local_heap_->NewPersistentHandle(obj).location();
+ DCHECK_NOT_NULL(local_isolate());
+ *find_result.entry =
+ local_isolate()->heap()->NewPersistentHandle(obj).location();
}
- return Handle<T>(*entry);
+ return Handle<T>(*find_result.entry);
} else {
return Handle<T>(object, isolate());
}
@@ -357,9 +373,9 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
BrokerMode mode_ = kDisabled;
bool const tracing_enabled_;
bool const is_concurrent_inlining_;
- bool const is_native_context_independent_;
+ CodeKind const code_kind_;
std::unique_ptr<PersistentHandles> ph_;
- base::Optional<LocalHeap> local_heap_;
+ LocalIsolate* local_isolate_ = nullptr;
std::unique_ptr<CanonicalHandlesMap> canonical_handles_;
unsigned trace_indentation_ = 0;
PerIsolateCompilerCache* compiler_cache_ = nullptr;
@@ -451,17 +467,19 @@ class OffHeapBytecodeArray final : public interpreter::AbstractBytecodeArray {
// Scope that unparks the LocalHeap, if:
// a) We have a JSHeapBroker,
-// b) Said JSHeapBroker has a LocalHeap, and
-// c) Said LocalHeap has been parked.
+// b) Said JSHeapBroker has a LocalIsolate and thus a LocalHeap,
+// c) Said LocalHeap has been parked and
+// d) The given condition evaluates to true.
// Used, for example, when printing the graph with --trace-turbo with a
// previously parked LocalHeap.
class UnparkedScopeIfNeeded {
public:
- explicit UnparkedScopeIfNeeded(JSHeapBroker* broker) {
- if (broker != nullptr) {
- LocalHeap* local_heap = broker->local_heap();
- if (local_heap != nullptr && local_heap->IsParked()) {
- unparked_scope.emplace(local_heap);
+ explicit UnparkedScopeIfNeeded(JSHeapBroker* broker,
+ bool extra_condition = true) {
+ if (broker != nullptr && extra_condition) {
+ LocalIsolate* local_isolate = broker->local_isolate();
+ if (local_isolate != nullptr && local_isolate->heap()->IsParked()) {
+ unparked_scope.emplace(local_isolate->heap());
}
}
}
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index 837369ec55..4ad4181b59 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -172,10 +172,12 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
break;
}
case IrOpcode::kJSLoadNamedFromSuper: {
- // TODO(marja, v8:9237): Process feedback once it's added to the byte
- // code.
NamedAccess const& p = NamedAccessOf(node->op());
NameRef name(broker(), p.name());
+ if (p.feedback().IsValid()) {
+ broker()->ProcessFeedbackForPropertyAccess(p.feedback(),
+ AccessMode::kLoad, name);
+ }
break;
}
case IrOpcode::kJSStoreNamed: {
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 31f8298757..33846d1ac3 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -77,7 +77,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
out.node = node;
HeapObjectMatcher m(callee);
- if (m.HasValue() && m.Ref(broker()).IsJSFunction()) {
+ if (m.HasResolvedValue() && m.Ref(broker()).IsJSFunction()) {
out.functions[0] = m.Ref(broker()).AsJSFunction();
JSFunctionRef function = out.functions[0].value();
if (CanConsiderForInlining(broker(), function)) {
@@ -94,7 +94,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
}
for (int n = 0; n < value_input_count; ++n) {
HeapObjectMatcher m(callee->InputAt(n));
- if (!m.HasValue() || !m.Ref(broker()).IsJSFunction()) {
+ if (!m.HasResolvedValue() || !m.Ref(broker()).IsJSFunction()) {
out.num_functions = 0;
return out;
}
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 74e9d2c012..30f0a01d52 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -287,7 +287,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
// calls whenever the target is a constant function object, as follows:
// - JSCall(target:constant, receiver, args..., vector)
// - JSConstruct(target:constant, new.target, args..., vector)
- if (match.HasValue() && match.Ref(broker()).IsJSFunction()) {
+ if (match.HasResolvedValue() && match.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// The function might have not been called yet.
@@ -332,20 +332,20 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
// following static information is provided:
// - context : The context (as SSA value) bound by the call target.
// - feedback_vector : The target is guaranteed to use this feedback vector.
-FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
- Node** context_out) {
+FeedbackCellRef JSInliner::DetermineCallContext(Node* node,
+ Node** context_out) {
DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
Node* target = node->InputAt(JSCallOrConstructNode::TargetIndex());
HeapObjectMatcher match(target);
- if (match.HasValue() && match.Ref(broker()).IsJSFunction()) {
+ if (match.HasResolvedValue() && match.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// This was already ensured by DetermineCallTarget
CHECK(function.has_feedback_vector());
// The inlinee specializes to the context from the JSFunction object.
*context_out = jsgraph()->Constant(function.context());
- return function.feedback_vector();
+ return function.raw_feedback_cell();
}
if (match.IsJSCreateClosure()) {
@@ -356,7 +356,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
// The inlinee uses the locally provided context at instantiation.
*context_out = NodeProperties::GetContextInput(match.node());
- return cell.value().AsFeedbackVector();
+ return cell;
} else if (match.IsCheckClosure()) {
FeedbackCellRef cell(broker(), FeedbackCellOf(match.op()));
@@ -367,7 +367,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
match.node(), effect, control);
NodeProperties::ReplaceEffectInput(node, effect);
- return cell.value().AsFeedbackVector();
+ return cell;
}
// Must succeed.
@@ -438,8 +438,9 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
: ""));
// Determine the target's feedback vector and its context.
Node* context;
- FeedbackVectorRef feedback_vector = DetermineCallContext(node, &context);
- CHECK(broker()->IsSerializedForCompilation(*shared_info, feedback_vector));
+ FeedbackCellRef feedback_cell = DetermineCallContext(node, &context);
+ CHECK(broker()->IsSerializedForCompilation(
+ *shared_info, feedback_cell.value().AsFeedbackVector()));
// ----------------------------------------------------------------
// After this point, we've made a decision to inline this function.
@@ -468,7 +469,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
}
{
CallFrequency frequency = call.frequency();
- BuildGraphFromBytecode(broker(), zone(), *shared_info, feedback_vector,
+ BuildGraphFromBytecode(broker(), zone(), *shared_info, feedback_cell,
BailoutId::None(), jsgraph(), frequency,
source_positions_, inlining_id, info_->code_kind(),
flags, &info_->tick_counter());
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index f60d53dbc9..0648c86f62 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -59,7 +59,7 @@ class JSInliner final : public AdvancedReducer {
SourcePositionTable* const source_positions_;
base::Optional<SharedFunctionInfoRef> DetermineCallTarget(Node* node);
- FeedbackVectorRef DetermineCallContext(Node* node, Node** context_out);
+ FeedbackCellRef DetermineCallContext(Node* node, Node** context_out);
Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count, BailoutId bailout_id,
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 03ac064c4e..653f8ff114 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -323,7 +323,7 @@ Reduction JSIntrinsicLowering::ReduceToObject(Node* node) {
Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
// ToString is unnecessary if the input is a string.
HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
- if (m.HasValue() && m.Ref(broker()).IsString()) {
+ if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
ReplaceWithValue(node, m.node());
return Replace(m.node());
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 2a4524f386..a0115df0db 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -102,6 +102,8 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
return ReduceJSStoreGlobal(node);
case IrOpcode::kJSLoadNamed:
return ReduceJSLoadNamed(node);
+ case IrOpcode::kJSLoadNamedFromSuper:
+ return ReduceJSLoadNamedFromSuper(node);
case IrOpcode::kJSStoreNamed:
return ReduceJSStoreNamed(node);
case IrOpcode::kJSHasProperty:
@@ -136,13 +138,13 @@ base::Optional<size_t> JSNativeContextSpecialization::GetMaxStringLength(
}
HeapObjectMatcher matcher(node);
- if (matcher.HasValue() && matcher.Ref(broker).IsString()) {
+ if (matcher.HasResolvedValue() && matcher.Ref(broker).IsString()) {
StringRef input = matcher.Ref(broker).AsString();
return input.length();
}
NumberMatcher number_matcher(node);
- if (number_matcher.HasValue()) {
+ if (number_matcher.HasResolvedValue()) {
return kBase10MaximalLength + 1;
}
@@ -157,7 +159,7 @@ Reduction JSNativeContextSpecialization::ReduceJSToString(Node* node) {
Reduction reduction;
HeapObjectMatcher matcher(input);
- if (matcher.HasValue() && matcher.Ref(broker()).IsString()) {
+ if (matcher.HasResolvedValue() && matcher.Ref(broker()).IsString()) {
reduction = Changed(input); // JSToString(x:string) => x
ReplaceWithValue(node, reduction.replacement());
return reduction;
@@ -168,9 +170,9 @@ Reduction JSNativeContextSpecialization::ReduceJSToString(Node* node) {
// so alternative approach should be designed if this causes performance
// regressions and the stronger optimization should be re-implemented.
NumberMatcher number_matcher(input);
- if (number_matcher.HasValue()) {
- const StringConstantBase* base =
- shared_zone()->New<NumberToStringConstant>(number_matcher.Value());
+ if (number_matcher.HasResolvedValue()) {
+ const StringConstantBase* base = shared_zone()->New<NumberToStringConstant>(
+ number_matcher.ResolvedValue());
reduction =
Replace(graph()->NewNode(common()->DelayedStringConstant(base)));
ReplaceWithValue(node, reduction.replacement());
@@ -186,11 +188,12 @@ JSNativeContextSpecialization::CreateDelayedStringConstant(Node* node) {
return StringConstantBaseOf(node->op());
} else {
NumberMatcher number_matcher(node);
- if (number_matcher.HasValue()) {
- return shared_zone()->New<NumberToStringConstant>(number_matcher.Value());
+ if (number_matcher.HasResolvedValue()) {
+ return shared_zone()->New<NumberToStringConstant>(
+ number_matcher.ResolvedValue());
} else {
HeapObjectMatcher matcher(node);
- if (matcher.HasValue() && matcher.Ref(broker()).IsString()) {
+ if (matcher.HasResolvedValue() && matcher.Ref(broker()).IsString()) {
StringRef s = matcher.Ref(broker()).AsString();
return shared_zone()->New<StringLiteral>(
s.object(), static_cast<size_t>(s.length()));
@@ -208,7 +211,7 @@ bool IsStringConstant(JSHeapBroker* broker, Node* node) {
}
HeapObjectMatcher matcher(node);
- return matcher.HasValue() && matcher.Ref(broker).IsString();
+ return matcher.HasResolvedValue() && matcher.Ref(broker).IsString();
}
} // namespace
@@ -352,20 +355,21 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
// Check if the input is a known JSFunction.
HeapObjectMatcher m(constructor);
- if (!m.HasValue()) return NoChange();
+ if (!m.HasResolvedValue() || !m.Ref(broker()).IsJSFunction()) {
+ return NoChange();
+ }
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
MapRef function_map = function.map();
if (should_disallow_heap_access() && !function_map.serialized_prototype()) {
TRACE_BROKER_MISSING(broker(), "data for map " << function_map);
return NoChange();
}
- ObjectRef function_prototype = function_map.prototype();
+ HeapObjectRef function_prototype = function_map.prototype();
// We can constant-fold the super constructor access if the
// {function}s map is stable, i.e. we can use a code dependency
// to guard against [[Prototype]] changes of {function}.
- if (function_map.is_stable() && function_prototype.IsHeapObject() &&
- function_prototype.AsHeapObject().map().is_constructor()) {
+ if (function_map.is_stable()) {
dependencies()->DependOnStableMap(function_map);
Node* value = jsgraph()->Constant(function_prototype);
ReplaceWithValue(node, value);
@@ -389,7 +393,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// we have feedback from the InstanceOfIC.
Handle<JSObject> receiver;
HeapObjectMatcher m(constructor);
- if (m.HasValue() && m.Ref(broker()).IsJSObject()) {
+ if (m.HasResolvedValue() && m.Ref(broker()).IsJSObject()) {
receiver = m.Ref(broker()).AsJSObject().object();
} else if (p.feedback().IsValid()) {
ProcessedFeedback const& feedback =
@@ -430,12 +434,12 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// takes over, but that requires the constructor to be callable.
if (!receiver_map.is_callable()) return NoChange();
- dependencies()->DependOnStablePrototypeChains(access_info.receiver_maps(),
- kStartAtPrototype);
+ dependencies()->DependOnStablePrototypeChains(
+ access_info.lookup_start_object_maps(), kStartAtPrototype);
// Monomorphic property access.
access_builder.BuildCheckMaps(constructor, &effect, control,
- access_info.receiver_maps());
+ access_info.lookup_start_object_maps());
// Lower to OrdinaryHasInstance(C, O).
NodeProperties::ReplaceValueInput(node, constructor, 0);
@@ -460,7 +464,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
if (found_on_proto) {
dependencies()->DependOnStablePrototypeChains(
- access_info.receiver_maps(), kStartAtPrototype,
+ access_info.lookup_start_object_maps(), kStartAtPrototype,
JSObjectRef(broker(), holder));
}
@@ -470,7 +474,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// Monomorphic property access.
access_builder.BuildCheckMaps(constructor, &effect, control,
- access_info.receiver_maps());
+ access_info.lookup_start_object_maps());
// Create a nested frame state inside the current method's most-recent frame
// state that will ensure that deopts that happen after this point will not
@@ -519,10 +523,9 @@ JSNativeContextSpecialization::InferHasInPrototypeChainResult
JSNativeContextSpecialization::InferHasInPrototypeChain(
Node* receiver, Node* effect, HeapObjectRef const& prototype) {
ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return kMayBeInPrototypeChain;
+ NodeProperties::InferMapsResult result = NodeProperties::InferMapsUnsafe(
+ broker(), receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoMaps) return kMayBeInPrototypeChain;
// Try to determine either that all of the {receiver_maps} have the given
// {prototype} in their chain, or that none do. If we can't tell, return
@@ -531,7 +534,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
bool none = true;
for (size_t i = 0; i < receiver_maps.size(); ++i) {
MapRef map(broker(), receiver_maps[i]);
- if (result == NodeProperties::kUnreliableReceiverMaps && !map.is_stable()) {
+ if (result == NodeProperties::kUnreliableMaps && !map.is_stable()) {
return kMayBeInPrototypeChain;
}
while (true) {
@@ -573,7 +576,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
if (!prototype.map().is_stable()) return kMayBeInPrototypeChain;
last_prototype = prototype.AsJSObject();
}
- WhereToStart start = result == NodeProperties::kUnreliableReceiverMaps
+ WhereToStart start = result == NodeProperties::kUnreliableMaps
? kStartAtReceiver
: kStartAtPrototype;
dependencies()->DependOnStablePrototypeChains(receiver_maps, start,
@@ -594,7 +597,7 @@ Reduction JSNativeContextSpecialization::ReduceJSHasInPrototypeChain(
// Check if we can constant-fold the prototype chain walk
// for the given {value} and the {prototype}.
HeapObjectMatcher m(prototype);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
InferHasInPrototypeChainResult result =
InferHasInPrototypeChain(value, effect, m.Ref(broker()));
if (result != kMayBeInPrototypeChain) {
@@ -615,7 +618,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// Check if the {constructor} is known at compile time.
HeapObjectMatcher m(constructor);
- if (!m.HasValue()) return NoChange();
+ if (!m.HasResolvedValue()) return NoChange();
if (m.Ref(broker()).IsJSBoundFunction()) {
// OrdinaryHasInstance on bound functions turns into a recursive invocation
@@ -681,7 +684,7 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
// Check if the {constructor} is the %Promise% function.
HeapObjectMatcher m(constructor);
- if (!m.HasValue() ||
+ if (!m.HasResolvedValue() ||
!m.Ref(broker()).equals(native_context().promise_function())) {
return NoChange();
}
@@ -747,8 +750,8 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
return inference.NoChange();
}
- dependencies()->DependOnStablePrototypeChains(access_info.receiver_maps(),
- kStartAtPrototype);
+ dependencies()->DependOnStablePrototypeChains(
+ access_info.lookup_start_object_maps(), kStartAtPrototype);
// Simply fulfill the {promise} with the {resolution}.
Node* value = effect =
@@ -779,23 +782,30 @@ FieldAccess ForPropertyCellValue(MachineRepresentation representation,
} // namespace
Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
- Node* node, Node* receiver, Node* value, NameRef const& name,
- AccessMode access_mode, Node* key) {
+ Node* node, Node* lookup_start_object, Node* receiver, Node* value,
+ NameRef const& name, AccessMode access_mode, Node* key, Node* effect) {
base::Optional<PropertyCellRef> cell =
native_context().global_object().GetPropertyCell(name);
- return cell.has_value() ? ReduceGlobalAccess(node, receiver, value, name,
- access_mode, key, *cell)
- : NoChange();
+ return cell.has_value()
+ ? ReduceGlobalAccess(node, lookup_start_object, receiver, value,
+ name, access_mode, key, *cell, effect)
+ : NoChange();
}
// TODO(neis): Try to merge this with ReduceNamedAccess by introducing a new
// PropertyAccessInfo kind for global accesses and using the existing mechanism
// for building loads/stores.
+// Note: The "receiver" parameter is only used for DCHECKS, but that's on
+// purpose. This way we can assert the super property access cases won't hit the
+// code which hasn't been modified to support super property access.
Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
- Node* node, Node* receiver, Node* value, NameRef const& name,
- AccessMode access_mode, Node* key, PropertyCellRef const& property_cell) {
- Node* effect = NodeProperties::GetEffectInput(node);
+ Node* node, Node* lookup_start_object, Node* receiver, Node* value,
+ NameRef const& name, AccessMode access_mode, Node* key,
+ PropertyCellRef const& property_cell, Node* effect) {
Node* control = NodeProperties::GetControlInput(node);
+ if (effect == nullptr) {
+ effect = NodeProperties::GetEffectInput(node);
+ }
ObjectRef property_cell_value = property_cell.value();
if (property_cell_value.IsHeapObject() &&
@@ -811,6 +821,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
// We have additional constraints for stores.
if (access_mode == AccessMode::kStore) {
+ DCHECK_EQ(receiver, lookup_start_object);
if (property_details.IsReadOnly()) {
// Don't even bother trying to lower stores to read-only data properties.
return NoChange();
@@ -826,6 +837,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
}
}
} else if (access_mode == AccessMode::kHas) {
+ DCHECK_EQ(receiver, lookup_start_object);
// has checks cannot follow the fast-path used by loads when these
// conditions hold.
if ((property_details.IsConfigurable() || !property_details.IsReadOnly()) &&
@@ -839,16 +851,16 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
effect = BuildCheckEqualsName(name, key, effect, control);
}
- // If we have a {receiver} to validate, we do so by checking that its map is
- // the (target) global proxy's map. This guarantees that in fact the receiver
- // is the global proxy.
- if (receiver != nullptr) {
+ // If we have a {lookup_start_object} to validate, we do so by checking that
+ // its map is the (target) global proxy's map. This guarantees that in fact
+ // the lookup start object is the global proxy.
+ if (lookup_start_object != nullptr) {
effect = graph()->NewNode(
simplified()->CheckMaps(
CheckMapsFlag::kNone,
ZoneHandleSet<Map>(
HeapObjectRef(broker(), global_proxy()).map().object())),
- receiver, effect, control);
+ lookup_start_object, effect, control);
}
if (access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) {
@@ -914,6 +926,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
}
} else {
DCHECK_EQ(AccessMode::kStore, access_mode);
+ DCHECK_EQ(receiver, lookup_start_object);
DCHECK(!property_details.IsReadOnly());
switch (property_details.cell_type()) {
case PropertyCellType::kUndefined: {
@@ -1010,7 +1023,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
ReplaceWithValue(node, value, effect);
return Replace(value);
} else if (feedback.IsPropertyCell()) {
- return ReduceGlobalAccess(node, nullptr, nullptr,
+ return ReduceGlobalAccess(node, nullptr, nullptr, nullptr,
NameRef(broker(), p.name()), AccessMode::kLoad,
nullptr, feedback.property_cell());
} else {
@@ -1041,9 +1054,9 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
ReplaceWithValue(node, value, effect, control);
return Replace(value);
} else if (feedback.IsPropertyCell()) {
- return ReduceGlobalAccess(node, nullptr, value, NameRef(broker(), p.name()),
- AccessMode::kStore, nullptr,
- feedback.property_cell());
+ return ReduceGlobalAccess(node, nullptr, nullptr, value,
+ NameRef(broker(), p.name()), AccessMode::kStore,
+ nullptr, feedback.property_cell());
} else {
DCHECK(feedback.IsMegamorphic());
return NoChange();
@@ -1054,10 +1067,26 @@ Reduction JSNativeContextSpecialization::ReduceMinimorphicPropertyAccess(
Node* node, Node* value,
MinimorphicLoadPropertyAccessFeedback const& feedback,
FeedbackSource const& source) {
- Node* receiver = NodeProperties::GetValueInput(node, 0);
+ DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
+ node->opcode() == IrOpcode::kJSLoadProperty ||
+ node->opcode() == IrOpcode::kJSLoadNamedFromSuper);
+ STATIC_ASSERT(JSLoadNamedNode::ObjectIndex() == 0 &&
+ JSLoadPropertyNode::ObjectIndex() == 0);
+
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ Node* lookup_start_object;
+ if (node->opcode() == IrOpcode::kJSLoadNamedFromSuper) {
+ DCHECK(FLAG_super_ic);
+ JSLoadNamedFromSuperNode n(node);
+ // Lookup start object is the __proto__ of the home object.
+ lookup_start_object = effect =
+ BuildLoadPrototypeFromObject(n.home_object(), effect, control);
+ } else {
+ lookup_start_object = NodeProperties::GetValueInput(node, 0);
+ }
+
MinimorphicLoadPropertyAccessInfo access_info =
broker()->GetPropertyAccessInfo(
feedback, source,
@@ -1066,17 +1095,32 @@ Reduction JSNativeContextSpecialization::ReduceMinimorphicPropertyAccess(
: SerializationPolicy::kSerializeIfNeeded);
if (access_info.IsInvalid()) return NoChange();
+ // The dynamic map check operator loads the feedback vector from the
+ // function's frame, so we can only use this for non-inlined functions.
+ // TODO(rmcilroy): Add support for using a trampoline like LoadICTrampoline
+ // and otherwise pass feedback vector explicitly if we need support for
+ // inlined functions.
+ // TODO(rmcilroy): Ideally we would check whether we are have an inlined frame
+ // state here, but there isn't a good way to distinguish inlined from OSR
+ // framestates.
+ DCHECK(broker()->is_turboprop());
+
PropertyAccessBuilder access_builder(jsgraph(), broker(), nullptr);
CheckMapsFlags flags = CheckMapsFlag::kNone;
if (feedback.has_migration_target_maps()) {
flags |= CheckMapsFlag::kTryMigrateInstance;
}
- effect =
- graph()->NewNode(simplified()->DynamicCheckMaps(flags, feedback.handler(),
- feedback.map(), source),
- receiver, effect, control);
+
+ ZoneHandleSet<Map> maps;
+ for (Handle<Map> map : feedback.maps()) {
+ maps.insert(map, graph()->zone());
+ }
+
+ effect = graph()->NewNode(
+ simplified()->DynamicCheckMaps(flags, feedback.handler(), maps, source),
+ lookup_start_object, effect, control);
value = access_builder.BuildMinimorphicLoadDataField(
- feedback.name(), access_info, receiver, &effect, &control);
+ feedback.name(), access_info, lookup_start_object, &effect, &control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -1091,7 +1135,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
node->opcode() == IrOpcode::kJSStoreProperty ||
node->opcode() == IrOpcode::kJSStoreNamedOwn ||
node->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral ||
- node->opcode() == IrOpcode::kJSHasProperty);
+ node->opcode() == IrOpcode::kJSHasProperty ||
+ node->opcode() == IrOpcode::kJSLoadNamedFromSuper);
STATIC_ASSERT(JSLoadNamedNode::ObjectIndex() == 0 &&
JSStoreNamedNode::ObjectIndex() == 0 &&
JSLoadPropertyNode::ObjectIndex() == 0 &&
@@ -1100,36 +1145,51 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
JSStoreNamedNode::ObjectIndex() == 0 &&
JSStoreDataPropertyInLiteralNode::ObjectIndex() == 0 &&
JSHasPropertyNode::ObjectIndex() == 0);
- Node* receiver = NodeProperties::GetValueInput(node, 0);
+ STATIC_ASSERT(JSLoadNamedFromSuperNode::ReceiverIndex() == 0);
+
Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ // receiver = the object we pass to the accessor (if any) as the "this" value.
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ // lookup_start_object = the object where we start looking for the property.
+ Node* lookup_start_object;
+ if (node->opcode() == IrOpcode::kJSLoadNamedFromSuper) {
+ DCHECK(FLAG_super_ic);
+ JSLoadNamedFromSuperNode n(node);
+ // Lookup start object is the __proto__ of the home object.
+ lookup_start_object = effect =
+ BuildLoadPrototypeFromObject(n.home_object(), effect, control);
+ } else {
+ lookup_start_object = receiver;
+ }
+
// Either infer maps from the graph or use the feedback.
- ZoneVector<Handle<Map>> receiver_maps(zone());
- if (!InferReceiverMaps(receiver, effect, &receiver_maps)) {
- receiver_maps = feedback.maps();
+ ZoneVector<Handle<Map>> lookup_start_object_maps(zone());
+ if (!InferMaps(lookup_start_object, effect, &lookup_start_object_maps)) {
+ lookup_start_object_maps = feedback.maps();
}
- RemoveImpossibleReceiverMaps(receiver, &receiver_maps);
+ RemoveImpossibleMaps(lookup_start_object, &lookup_start_object_maps);
// Check if we have an access o.x or o.x=v where o is the target native
// contexts' global proxy, and turn that into a direct access to the
// corresponding global object instead.
- if (receiver_maps.size() == 1) {
- MapRef receiver_map(broker(), receiver_maps[0]);
- if (receiver_map.equals(
+ if (lookup_start_object_maps.size() == 1) {
+ MapRef lookup_start_object_map(broker(), lookup_start_object_maps[0]);
+ if (lookup_start_object_map.equals(
broker()->target_native_context().global_proxy_object().map()) &&
!broker()->target_native_context().global_object().IsDetached()) {
- return ReduceGlobalAccess(node, receiver, value, feedback.name(),
- access_mode, key);
+ return ReduceGlobalAccess(node, lookup_start_object, receiver, value,
+ feedback.name(), access_mode, key, effect);
}
}
ZoneVector<PropertyAccessInfo> access_infos(zone());
{
ZoneVector<PropertyAccessInfo> access_infos_for_feedback(zone());
- for (Handle<Map> map_handle : receiver_maps) {
+ for (Handle<Map> map_handle : lookup_start_object_maps) {
MapRef map(broker(), map_handle);
if (map.is_deprecated()) continue;
PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
@@ -1166,15 +1226,26 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Check for the monomorphic cases.
if (access_infos.size() == 1) {
PropertyAccessInfo access_info = access_infos.front();
- // Try to build string check or number check if possible.
- // Otherwise build a map check.
- if (!access_builder.TryBuildStringCheck(broker(),
- access_info.receiver_maps(),
- &receiver, &effect, control) &&
- !access_builder.TryBuildNumberCheck(broker(),
- access_info.receiver_maps(),
- &receiver, &effect, control)) {
- if (HasNumberMaps(broker(), access_info.receiver_maps())) {
+ if (receiver != lookup_start_object) {
+ // Super property access. lookup_start_object is a JSReceiver or
+ // null. It can't be a number, a string etc. So trying to build the
+ // checks in the "else if" branch doesn't make sense.
+ access_builder.BuildCheckMaps(lookup_start_object, &effect, control,
+ access_info.lookup_start_object_maps());
+
+ } else if (!access_builder.TryBuildStringCheck(
+ broker(), access_info.lookup_start_object_maps(), &receiver,
+ &effect, control) &&
+ !access_builder.TryBuildNumberCheck(
+ broker(), access_info.lookup_start_object_maps(), &receiver,
+ &effect, control)) {
+ // Try to build string check or number check if possible. Otherwise build
+ // a map check.
+
+ // TryBuildStringCheck and TryBuildNumberCheck don't update the receiver
+ // if they fail.
+ DCHECK_EQ(receiver, lookup_start_object);
+ if (HasNumberMaps(broker(), access_info.lookup_start_object_maps())) {
// We need to also let Smi {receiver}s through in this case, so
// we construct a diamond, guarded by the Sminess of the {receiver}
// and if {receiver} is not a Smi just emit a sequence of map checks.
@@ -1188,7 +1259,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* efalse = effect;
{
access_builder.BuildCheckMaps(receiver, &efalse, if_false,
- access_info.receiver_maps());
+ access_info.lookup_start_object_maps());
}
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
@@ -1196,14 +1267,19 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
} else {
access_builder.BuildCheckMaps(receiver, &effect, control,
- access_info.receiver_maps());
+ access_info.lookup_start_object_maps());
}
+ } else {
+ // At least one of TryBuildStringCheck & TryBuildNumberCheck succeeded
+ // and updated the receiver. Update lookup_start_object to match (they
+ // should be the same).
+ lookup_start_object = receiver;
}
// Generate the actual property access.
ValueEffectControl continuation = BuildPropertyAccess(
- receiver, value, context, frame_state, effect, control, feedback.name(),
- if_exceptions, access_info, access_mode);
+ lookup_start_object, receiver, value, context, frame_state, effect,
+ control, feedback.name(), if_exceptions, access_info, access_mode);
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@@ -1214,24 +1290,27 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
ZoneVector<Node*> effects(zone());
ZoneVector<Node*> controls(zone());
- // Check if {receiver} may be a number.
- bool receiverissmi_possible = false;
- for (PropertyAccessInfo const& access_info : access_infos) {
- if (HasNumberMaps(broker(), access_info.receiver_maps())) {
- receiverissmi_possible = true;
- break;
- }
- }
-
- // Handle the case that {receiver} may be a number.
Node* receiverissmi_control = nullptr;
Node* receiverissmi_effect = effect;
- if (receiverissmi_possible) {
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
- Node* branch = graph()->NewNode(common()->Branch(), check, control);
- control = graph()->NewNode(common()->IfFalse(), branch);
- receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
- receiverissmi_effect = effect;
+
+ if (receiver == lookup_start_object) {
+ // Check if {receiver} may be a number.
+ bool receiverissmi_possible = false;
+ for (PropertyAccessInfo const& access_info : access_infos) {
+ if (HasNumberMaps(broker(), access_info.lookup_start_object_maps())) {
+ receiverissmi_possible = true;
+ break;
+ }
+ }
+
+ // Handle the case that {receiver} may be a number.
+ if (receiverissmi_possible) {
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+ control = graph()->NewNode(common()->IfFalse(), branch);
+ receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
+ receiverissmi_effect = effect;
+ }
}
// Generate code for the various different property access patterns.
@@ -1239,24 +1318,25 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
for (size_t j = 0; j < access_infos.size(); ++j) {
PropertyAccessInfo const& access_info = access_infos[j];
Node* this_value = value;
+ Node* this_lookup_start_object = lookup_start_object;
Node* this_receiver = receiver;
Node* this_effect = effect;
Node* this_control = fallthrough_control;
- // Perform map check on {receiver}.
- ZoneVector<Handle<Map>> const& receiver_maps =
- access_info.receiver_maps();
+ // Perform map check on {lookup_start_object}.
+ ZoneVector<Handle<Map>> const& lookup_start_object_maps =
+ access_info.lookup_start_object_maps();
{
// Whether to insert a dedicated MapGuard node into the
// effect to be able to learn from the control flow.
bool insert_map_guard = true;
- // Check maps for the {receiver}s.
+ // Check maps for the {lookup_start_object}s.
if (j == access_infos.size() - 1) {
// Last map check on the fallthrough control path, do a
// conditional eager deoptimization exit here.
- access_builder.BuildCheckMaps(receiver, &this_effect, this_control,
- receiver_maps);
+ access_builder.BuildCheckMaps(lookup_start_object, &this_effect,
+ this_control, lookup_start_object_maps);
fallthrough_control = nullptr;
// Don't insert a MapGuard in this case, as the CheckMaps
@@ -1264,14 +1344,14 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// along the effect chain.
insert_map_guard = false;
} else {
- // Explicitly branch on the {receiver_maps}.
+ // Explicitly branch on the {lookup_start_object_maps}.
ZoneHandleSet<Map> maps;
- for (Handle<Map> map : receiver_maps) {
+ for (Handle<Map> map : lookup_start_object_maps) {
maps.insert(map, graph()->zone());
}
Node* check = this_effect =
- graph()->NewNode(simplified()->CompareMaps(maps), receiver,
- this_effect, this_control);
+ graph()->NewNode(simplified()->CompareMaps(maps),
+ lookup_start_object, this_effect, this_control);
Node* branch =
graph()->NewNode(common()->Branch(), check, this_control);
fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
@@ -1279,8 +1359,9 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
// The Number case requires special treatment to also deal with Smis.
- if (HasNumberMaps(broker(), receiver_maps)) {
+ if (HasNumberMaps(broker(), lookup_start_object_maps)) {
// Join this check with the "receiver is smi" check above.
+ DCHECK_EQ(receiver, lookup_start_object);
DCHECK_NOT_NULL(receiverissmi_effect);
DCHECK_NOT_NULL(receiverissmi_control);
this_control = graph()->NewNode(common()->Merge(2), this_control,
@@ -1289,7 +1370,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
receiverissmi_effect, this_control);
receiverissmi_effect = receiverissmi_control = nullptr;
- // The {receiver} can also be a Smi in this case, so
+ // The {lookup_start_object} can also be a Smi in this case, so
// a MapGuard doesn't make sense for this at all.
insert_map_guard = false;
}
@@ -1297,29 +1378,32 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Introduce a MapGuard to learn from this on the effect chain.
if (insert_map_guard) {
ZoneHandleSet<Map> maps;
- for (auto receiver_map : receiver_maps) {
- maps.insert(receiver_map, graph()->zone());
+ for (auto lookup_start_object_map : lookup_start_object_maps) {
+ maps.insert(lookup_start_object_map, graph()->zone());
}
- this_effect = graph()->NewNode(simplified()->MapGuard(maps), receiver,
- this_effect, this_control);
+ this_effect =
+ graph()->NewNode(simplified()->MapGuard(maps),
+ lookup_start_object, this_effect, this_control);
}
- // If all {receiver_maps} are Strings we also need to rename the
- // {receiver} here to make sure that TurboFan knows that along this
- // path the {this_receiver} is a String. This is because we want
- // strict checking of types, for example for StringLength operators.
- if (HasOnlyStringMaps(broker(), receiver_maps)) {
- this_receiver = this_effect =
- graph()->NewNode(common()->TypeGuard(Type::String()), receiver,
- this_effect, this_control);
+ // If all {lookup_start_object_maps} are Strings we also need to rename
+ // the {lookup_start_object} here to make sure that TurboFan knows that
+ // along this path the {this_lookup_start_object} is a String. This is
+ // because we want strict checking of types, for example for
+ // StringLength operators.
+ if (HasOnlyStringMaps(broker(), lookup_start_object_maps)) {
+ DCHECK_EQ(receiver, lookup_start_object);
+ this_lookup_start_object = this_receiver = this_effect =
+ graph()->NewNode(common()->TypeGuard(Type::String()),
+ lookup_start_object, this_effect, this_control);
}
}
// Generate the actual property access.
- ValueEffectControl continuation =
- BuildPropertyAccess(this_receiver, this_value, context, frame_state,
- this_effect, this_control, feedback.name(),
- if_exceptions, access_info, access_mode);
+ ValueEffectControl continuation = BuildPropertyAccess(
+ this_lookup_start_object, this_receiver, this_value, context,
+ frame_state, this_effect, this_control, feedback.name(),
+ if_exceptions, access_info, access_mode);
values.push_back(continuation.value());
effects.push_back(continuation.effect());
controls.push_back(continuation.control());
@@ -1377,7 +1461,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
// Check if we have a constant receiver.
HeapObjectMatcher m(receiver);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
ObjectRef object = m.Ref(broker());
if (object.IsJSFunction() &&
name.equals(ObjectRef(broker(), factory()->prototype_string()))) {
@@ -1411,6 +1495,17 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
FeedbackSource(p.feedback()), AccessMode::kLoad);
}
+Reduction JSNativeContextSpecialization::ReduceJSLoadNamedFromSuper(
+ Node* node) {
+ JSLoadNamedFromSuperNode n(node);
+ NamedAccess const& p = n.Parameters();
+ NameRef name(broker(), p.name());
+
+ if (!p.feedback().IsValid()) return NoChange();
+ return ReducePropertyAccess(node, nullptr, name, jsgraph()->Dead(),
+ FeedbackSource(p.feedback()), AccessMode::kLoad);
+}
+
Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) {
JSGetIteratorNode n(node);
GetIteratorParameters const& p = n.Parameters();
@@ -1545,7 +1640,7 @@ namespace {
base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
Node* receiver) {
HeapObjectMatcher m(receiver);
- if (!m.HasValue()) return base::nullopt;
+ if (!m.HasResolvedValue()) return base::nullopt;
ObjectRef object = m.Ref(broker);
if (!object.IsJSTypedArray()) return base::nullopt;
JSTypedArrayRef typed_array = object.AsJSTypedArray();
@@ -1554,20 +1649,20 @@ base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
}
} // namespace
-void JSNativeContextSpecialization::RemoveImpossibleReceiverMaps(
- Node* receiver, ZoneVector<Handle<Map>>* receiver_maps) const {
- base::Optional<MapRef> root_map = InferReceiverRootMap(receiver);
+void JSNativeContextSpecialization::RemoveImpossibleMaps(
+ Node* object, ZoneVector<Handle<Map>>* maps) const {
+ base::Optional<MapRef> root_map = InferRootMap(object);
if (root_map.has_value()) {
DCHECK(!root_map->is_abandoned_prototype_map());
- receiver_maps->erase(
- std::remove_if(receiver_maps->begin(), receiver_maps->end(),
+ maps->erase(
+ std::remove_if(maps->begin(), maps->end(),
[root_map, this](Handle<Map> map) {
MapRef map_ref(broker(), map);
return map_ref.is_abandoned_prototype_map() ||
(map_ref.FindRootMap().has_value() &&
!map_ref.FindRootMap()->equals(*root_map));
}),
- receiver_maps->end());
+ maps->end());
}
}
@@ -1581,9 +1676,9 @@ JSNativeContextSpecialization::TryRefineElementAccessFeedback(
if (!use_inference) return feedback;
ZoneVector<Handle<Map>> inferred_maps(zone());
- if (!InferReceiverMaps(receiver, effect, &inferred_maps)) return feedback;
+ if (!InferMaps(receiver, effect, &inferred_maps)) return feedback;
- RemoveImpossibleReceiverMaps(receiver, &inferred_maps);
+ RemoveImpossibleMaps(receiver, &inferred_maps);
// TODO(neis): After Refine, the resulting feedback can still contain
// impossible maps when a target is kept only because more than one of its
// sources was inferred. Think of a way to completely rule out impossible
@@ -1650,7 +1745,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// the zone allocation of this vector.
ZoneVector<MapRef> prototype_maps(zone());
for (ElementAccessInfo const& access_info : access_infos) {
- for (Handle<Map> map : access_info.receiver_maps()) {
+ for (Handle<Map> map : access_info.lookup_start_object_maps()) {
MapRef receiver_map(broker(), map);
// If the {receiver_map} has a prototype and its elements backing
// store is either holey, or we have a potentially growing store,
@@ -1697,9 +1792,10 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
ElementAccessInfo access_info = access_infos.front();
// Perform possible elements kind transitions.
- MapRef transition_target(broker(), access_info.receiver_maps().front());
+ MapRef transition_target(broker(),
+ access_info.lookup_start_object_maps().front());
for (auto source : access_info.transition_sources()) {
- DCHECK_EQ(access_info.receiver_maps().size(), 1);
+ DCHECK_EQ(access_info.lookup_start_object_maps().size(), 1);
MapRef transition_source(broker(), source);
effect = graph()->NewNode(
simplified()->TransitionElementsKind(ElementsTransition(
@@ -1721,7 +1817,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Perform map check on the {receiver}.
access_builder.BuildCheckMaps(receiver, &effect, control,
- access_info.receiver_maps());
+ access_info.lookup_start_object_maps());
// Access the actual element.
ValueEffectControl continuation =
@@ -1748,10 +1844,11 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* this_control = fallthrough_control;
// Perform possible elements kind transitions.
- MapRef transition_target(broker(), access_info.receiver_maps().front());
+ MapRef transition_target(broker(),
+ access_info.lookup_start_object_maps().front());
for (auto source : access_info.transition_sources()) {
MapRef transition_source(broker(), source);
- DCHECK_EQ(access_info.receiver_maps().size(), 1);
+ DCHECK_EQ(access_info.lookup_start_object_maps().size(), 1);
this_effect = graph()->NewNode(
simplified()->TransitionElementsKind(ElementsTransition(
IsSimpleMapChangeTransition(transition_source.elements_kind(),
@@ -1764,7 +1861,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Perform map check(s) on {receiver}.
ZoneVector<Handle<Map>> const& receiver_maps =
- access_info.receiver_maps();
+ access_info.lookup_start_object_maps();
if (j == access_infos.size() - 1) {
// Last map check on the fallthrough control path, do a
// conditional eager deoptimization exit here.
@@ -1849,7 +1946,7 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
// constant-fold the load.
NumberMatcher mkey(key);
if (mkey.IsInteger() && mkey.IsInRange(0.0, kMaxUInt32 - 1.0)) {
- uint32_t index = static_cast<uint32_t>(mkey.Value());
+ uint32_t index = static_cast<uint32_t>(mkey.ResolvedValue());
base::Optional<ObjectRef> element =
receiver_ref.GetOwnConstantElement(index);
if (!element.has_value() && receiver_ref.IsJSArray()) {
@@ -1911,7 +2008,8 @@ Reduction JSNativeContextSpecialization::ReducePropertyAccess(
node->opcode() == IrOpcode::kJSHasProperty ||
node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed ||
- node->opcode() == IrOpcode::kJSStoreNamedOwn);
+ node->opcode() == IrOpcode::kJSStoreNamedOwn ||
+ node->opcode() == IrOpcode::kJSLoadNamedFromSuper);
DCHECK_GE(node->op()->ControlOutputCount(), 1);
ProcessedFeedback const& feedback =
@@ -1932,6 +2030,7 @@ Reduction JSNativeContextSpecialization::ReducePropertyAccess(
case ProcessedFeedback::kElementAccess:
DCHECK_EQ(feedback.AsElementAccess().keyed_mode().access_mode(),
access_mode);
+ DCHECK_NE(node->opcode(), IrOpcode::kJSLoadNamedFromSuper);
return ReduceElementAccess(node, key, value, feedback.AsElementAccess());
default:
UNREACHABLE();
@@ -2008,18 +2107,17 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey(
DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* name = NodeProperties::GetValueInput(node, 1);
- DCHECK_EQ(IrOpcode::kJSForInNext, name->opcode());
+ JSForInNextNode name(NodeProperties::GetValueInput(node, 1));
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (ForInModeOf(name->op()) != ForInMode::kUseEnumCacheKeysAndIndices) {
+ if (name.Parameters().mode() != ForInMode::kUseEnumCacheKeysAndIndices) {
return NoChange();
}
- Node* object = NodeProperties::GetValueInput(name, 0);
- Node* enumerator = NodeProperties::GetValueInput(name, 2);
- Node* key = NodeProperties::GetValueInput(name, 3);
+ Node* object = name.receiver();
+ Node* cache_type = name.cache_type();
+ Node* index = name.index();
if (object->opcode() == IrOpcode::kJSToObject) {
object = NodeProperties::GetValueInput(object, 0);
}
@@ -2033,7 +2131,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey(
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
receiver, effect, control);
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
- enumerator);
+ cache_type);
effect =
graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongMap),
check, effect, control);
@@ -2041,7 +2139,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey(
// Load the enum cache indices from the {cache_type}.
Node* descriptor_array = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapDescriptors()), enumerator,
+ simplified()->LoadField(AccessBuilder::ForMapDescriptors()), cache_type,
effect, control);
Node* enum_cache = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForDescriptorArrayEnumCache()),
@@ -2060,10 +2158,10 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey(
control);
// Determine the key from the {enum_indices}.
- key = effect = graph()->NewNode(
+ Node* key = effect = graph()->NewNode(
simplified()->LoadElement(
AccessBuilder::ForFixedArrayElement(PACKED_SMI_ELEMENTS)),
- enum_indices, key, effect, control);
+ enum_indices, index, effect, control);
// Load the actual field value.
Node* value = effect = graph()->NewNode(simplified()->LoadFieldByIndex(),
@@ -2229,14 +2327,14 @@ Node* JSNativeContextSpecialization::InlineApiCall(
JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildPropertyLoad(
- Node* receiver, Node* context, Node* frame_state, Node* effect,
- Node* control, NameRef const& name, ZoneVector<Node*>* if_exceptions,
- PropertyAccessInfo const& access_info) {
+ Node* lookup_start_object, Node* receiver, Node* context, Node* frame_state,
+ Node* effect, Node* control, NameRef const& name,
+ ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
dependencies()->DependOnStablePrototypeChains(
- access_info.receiver_maps(), kStartAtPrototype,
+ access_info.lookup_start_object_maps(), kStartAtPrototype,
JSObjectRef(broker(), holder));
}
@@ -2254,12 +2352,13 @@ JSNativeContextSpecialization::BuildPropertyLoad(
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()),
cell, effect, control);
} else if (access_info.IsStringLength()) {
+ DCHECK_EQ(receiver, lookup_start_object);
value = graph()->NewNode(simplified()->StringLength(), receiver);
} else {
DCHECK(access_info.IsDataField() || access_info.IsDataConstant());
PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
- value = access_builder.BuildLoadDataField(name, access_info, receiver,
- &effect, &control);
+ value = access_builder.BuildLoadDataField(
+ name, access_info, lookup_start_object, &effect, &control);
}
return ValueEffectControl(value, effect, control);
@@ -2272,7 +2371,7 @@ JSNativeContextSpecialization::BuildPropertyTest(
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
dependencies()->DependOnStablePrototypeChains(
- access_info.receiver_maps(), kStartAtPrototype,
+ access_info.lookup_start_object_maps(), kStartAtPrototype,
JSObjectRef(broker(), holder));
}
@@ -2283,19 +2382,23 @@ JSNativeContextSpecialization::BuildPropertyTest(
JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildPropertyAccess(
- Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
- Node* control, NameRef const& name, ZoneVector<Node*>* if_exceptions,
- PropertyAccessInfo const& access_info, AccessMode access_mode) {
+ Node* lookup_start_object, Node* receiver, Node* value, Node* context,
+ Node* frame_state, Node* effect, Node* control, NameRef const& name,
+ ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info,
+ AccessMode access_mode) {
switch (access_mode) {
case AccessMode::kLoad:
- return BuildPropertyLoad(receiver, context, frame_state, effect, control,
- name, if_exceptions, access_info);
+ return BuildPropertyLoad(lookup_start_object, receiver, context,
+ frame_state, effect, control, name,
+ if_exceptions, access_info);
case AccessMode::kStore:
case AccessMode::kStoreInLiteral:
+ DCHECK_EQ(receiver, lookup_start_object);
return BuildPropertyStore(receiver, value, context, frame_state, effect,
control, name, if_exceptions, access_info,
access_mode);
case AccessMode::kHas:
+ DCHECK_EQ(receiver, lookup_start_object);
return BuildPropertyTest(effect, control, access_info);
}
UNREACHABLE();
@@ -2312,7 +2415,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
if (access_info.holder().ToHandle(&holder)) {
DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
dependencies()->DependOnStablePrototypeChains(
- access_info.receiver_maps(), kStartAtPrototype,
+ access_info.lookup_start_object_maps(), kStartAtPrototype,
JSObjectRef(broker(), holder));
}
@@ -2517,8 +2620,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
if (!p.feedback().IsValid()) return NoChange();
NumberMatcher mflags(n.flags());
- CHECK(mflags.HasValue());
- DataPropertyInLiteralFlags cflags(mflags.Value());
+ CHECK(mflags.HasResolvedValue());
+ DataPropertyInLiteralFlags cflags(mflags.ResolvedValue());
DCHECK(!(cflags & DataPropertyInLiteralFlag::kDontEnum));
if (cflags & DataPropertyInLiteralFlag::kSetFunctionName) return NoChange();
@@ -2575,7 +2678,8 @@ JSNativeContextSpecialization::BuildElementAccess(
// TODO(bmeurer): We currently specialize based on elements kind. We should
// also be able to properly support strings and other JSObjects here.
ElementsKind elements_kind = access_info.elements_kind();
- ZoneVector<Handle<Map>> const& receiver_maps = access_info.receiver_maps();
+ ZoneVector<Handle<Map>> const& receiver_maps =
+ access_info.lookup_start_object_maps();
if (IsTypedArrayElementsKind(elements_kind)) {
Node* buffer_or_receiver = receiver;
@@ -3334,42 +3438,40 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
return dependencies()->DependOnNoElementsProtector();
}
-bool JSNativeContextSpecialization::InferReceiverMaps(
- Node* receiver, Node* effect,
- ZoneVector<Handle<Map>>* receiver_maps) const {
- ZoneHandleSet<Map> maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect,
- &maps);
- if (result == NodeProperties::kReliableReceiverMaps) {
- for (size_t i = 0; i < maps.size(); ++i) {
- receiver_maps->push_back(maps[i]);
+bool JSNativeContextSpecialization::InferMaps(
+ Node* object, Node* effect, ZoneVector<Handle<Map>>* maps) const {
+ ZoneHandleSet<Map> map_set;
+ NodeProperties::InferMapsResult result =
+ NodeProperties::InferMapsUnsafe(broker(), object, effect, &map_set);
+ if (result == NodeProperties::kReliableMaps) {
+ for (size_t i = 0; i < map_set.size(); ++i) {
+ maps->push_back(map_set[i]);
}
return true;
- } else if (result == NodeProperties::kUnreliableReceiverMaps) {
- // For untrusted receiver maps, we can still use the information
+ } else if (result == NodeProperties::kUnreliableMaps) {
+ // For untrusted maps, we can still use the information
// if the maps are stable.
- for (size_t i = 0; i < maps.size(); ++i) {
- MapRef map(broker(), maps[i]);
+ for (size_t i = 0; i < map_set.size(); ++i) {
+ MapRef map(broker(), map_set[i]);
if (!map.is_stable()) return false;
}
- for (size_t i = 0; i < maps.size(); ++i) {
- receiver_maps->push_back(maps[i]);
+ for (size_t i = 0; i < map_set.size(); ++i) {
+ maps->push_back(map_set[i]);
}
return true;
}
return false;
}
-base::Optional<MapRef> JSNativeContextSpecialization::InferReceiverRootMap(
- Node* receiver) const {
- HeapObjectMatcher m(receiver);
- if (m.HasValue()) {
+base::Optional<MapRef> JSNativeContextSpecialization::InferRootMap(
+ Node* object) const {
+ HeapObjectMatcher m(object);
+ if (m.HasResolvedValue()) {
MapRef map = m.Ref(broker()).map();
return map.FindRootMap();
} else if (m.IsJSCreate()) {
base::Optional<MapRef> initial_map =
- NodeProperties::GetJSCreateMap(broker(), receiver);
+ NodeProperties::GetJSCreateMap(broker(), object);
if (initial_map.has_value()) {
if (!initial_map->FindRootMap().has_value()) {
return base::nullopt;
@@ -3381,6 +3483,16 @@ base::Optional<MapRef> JSNativeContextSpecialization::InferReceiverRootMap(
return base::nullopt;
}
+Node* JSNativeContextSpecialization::BuildLoadPrototypeFromObject(
+ Node* object, Node* effect, Node* control) {
+ Node* map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), object,
+ effect, control);
+ return graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapPrototype()), map, effect,
+ control);
+}
+
Graph* JSNativeContextSpecialization::graph() const {
return jsgraph()->graph();
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 81587870da..3d0c347261 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -16,7 +16,6 @@ namespace internal {
// Forward declarations.
class Factory;
-class FeedbackNexus;
class JSGlobalObject;
class JSGlobalProxy;
class StringConstantBase;
@@ -54,6 +53,9 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
JSHeapBroker* broker, Flags flags,
CompilationDependencies* dependencies,
Zone* zone, Zone* shared_zone);
+ JSNativeContextSpecialization(const JSNativeContextSpecialization&) = delete;
+ JSNativeContextSpecialization& operator=(
+ const JSNativeContextSpecialization&) = delete;
const char* reducer_name() const override {
return "JSNativeContextSpecialization";
@@ -81,6 +83,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Reduction ReduceJSLoadGlobal(Node* node);
Reduction ReduceJSStoreGlobal(Node* node);
Reduction ReduceJSLoadNamed(Node* node);
+ Reduction ReduceJSLoadNamedFromSuper(Node* node);
Reduction ReduceJSGetIterator(Node* node);
Reduction ReduceJSStoreNamed(Node* node);
Reduction ReduceJSHasProperty(Node* node);
@@ -92,7 +95,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Reduction ReduceJSToObject(Node* node);
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
- ElementAccessFeedback const& processed);
+ ElementAccessFeedback const& feedback);
// In the case of non-keyed (named) accesses, pass the name as {static_name}
// and use {nullptr} for {key} (load/store modes are irrelevant).
Reduction ReducePropertyAccess(Node* node, Node* key,
@@ -100,18 +103,21 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Node* value, FeedbackSource const& source,
AccessMode access_mode);
Reduction ReduceNamedAccess(Node* node, Node* value,
- NamedAccessFeedback const& processed,
+ NamedAccessFeedback const& feedback,
AccessMode access_mode, Node* key = nullptr);
Reduction ReduceMinimorphicPropertyAccess(
Node* node, Node* value,
MinimorphicLoadPropertyAccessFeedback const& feedback,
FeedbackSource const& source);
- Reduction ReduceGlobalAccess(Node* node, Node* receiver, Node* value,
- NameRef const& name, AccessMode access_mode,
- Node* key = nullptr);
- Reduction ReduceGlobalAccess(Node* node, Node* receiver, Node* value,
- NameRef const& name, AccessMode access_mode,
- Node* key, PropertyCellRef const& property_cell);
+ Reduction ReduceGlobalAccess(Node* node, Node* lookup_start_object,
+ Node* receiver, Node* value, NameRef const& name,
+ AccessMode access_mode, Node* key = nullptr,
+ Node* effect = nullptr);
+ Reduction ReduceGlobalAccess(Node* node, Node* lookup_start_object,
+ Node* receiver, Node* value, NameRef const& name,
+ AccessMode access_mode, Node* key,
+ PropertyCellRef const& property_cell,
+ Node* effect = nullptr);
Reduction ReduceElementLoadFromHeapConstant(Node* node, Node* key,
AccessMode access_mode,
KeyedAccessLoadMode load_mode);
@@ -144,14 +150,13 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
};
// Construct the appropriate subgraph for property access.
- ValueEffectControl BuildPropertyAccess(Node* receiver, Node* value,
- Node* context, Node* frame_state,
- Node* effect, Node* control,
- NameRef const& name,
- ZoneVector<Node*>* if_exceptions,
- PropertyAccessInfo const& access_info,
- AccessMode access_mode);
- ValueEffectControl BuildPropertyLoad(Node* receiver, Node* context,
+ ValueEffectControl BuildPropertyAccess(
+ Node* lookup_start_object, Node* receiver, Node* value, Node* context,
+ Node* frame_state, Node* effect, Node* control, NameRef const& name,
+ ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info,
+ AccessMode access_mode);
+ ValueEffectControl BuildPropertyLoad(Node* lookup_start_object,
+ Node* receiver, Node* context,
Node* frame_state, Node* effect,
Node* control, NameRef const& name,
ZoneVector<Node*>* if_exceptions,
@@ -210,20 +215,19 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
// code dependencies and might use the array protector cell.
bool CanTreatHoleAsUndefined(ZoneVector<Handle<Map>> const& receiver_maps);
- void RemoveImpossibleReceiverMaps(
- Node* receiver, ZoneVector<Handle<Map>>* receiver_maps) const;
+ void RemoveImpossibleMaps(Node* object, ZoneVector<Handle<Map>>* maps) const;
ElementAccessFeedback const& TryRefineElementAccessFeedback(
ElementAccessFeedback const& feedback, Node* receiver,
Node* effect) const;
- // Try to infer maps for the given {receiver} at the current {effect}.
- bool InferReceiverMaps(Node* receiver, Node* effect,
- ZoneVector<Handle<Map>>* receiver_maps) const;
+ // Try to infer maps for the given {object} at the current {effect}.
+ bool InferMaps(Node* object, Node* effect,
+ ZoneVector<Handle<Map>>* maps) const;
- // Try to infer a root map for the {receiver} independent of the current
- // program location.
- base::Optional<MapRef> InferReceiverRootMap(Node* receiver) const;
+ // Try to infer a root map for the {object} independent of the current program
+ // location.
+ base::Optional<MapRef> InferRootMap(Node* object) const;
// Checks if we know at compile time that the {receiver} either definitely
// has the {prototype} in it's prototype chain, or the {receiver} definitely
@@ -236,6 +240,8 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
InferHasInPrototypeChainResult InferHasInPrototypeChain(
Node* receiver, Node* effect, HeapObjectRef const& prototype);
+ Node* BuildLoadPrototypeFromObject(Node* object, Node* effect, Node* control);
+
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
@@ -265,8 +271,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Zone* const zone_;
Zone* const shared_zone_;
TypeCache const* type_cache_;
-
- DISALLOW_COPY_AND_ASSIGN(JSNativeContextSpecialization);
};
DEFINE_OPERATORS_FOR_FLAGS(JSNativeContextSpecialization::Flags)
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index dccc9558b5..da3af62bf2 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -40,8 +40,8 @@ TNode<Oddball> UndefinedConstant(JSGraph* jsgraph) {
FeedbackCellRef JSCreateClosureNode::GetFeedbackCellRefChecked(
JSHeapBroker* broker) const {
HeapObjectMatcher m(feedback_cell());
- CHECK(m.HasValue());
- return FeedbackCellRef(broker, m.Value());
+ CHECK(m.HasResolvedValue());
+ return FeedbackCellRef(broker, m.ResolvedValue());
}
std::ostream& operator<<(std::ostream& os, CallFrequency const& f) {
@@ -640,9 +640,9 @@ size_t hash_value(GetIteratorParameters const& p) {
FeedbackSource::Hash()(p.callFeedback()));
}
-size_t hash_value(ForInMode mode) { return static_cast<uint8_t>(mode); }
+size_t hash_value(ForInMode const& mode) { return static_cast<uint8_t>(mode); }
-std::ostream& operator<<(std::ostream& os, ForInMode mode) {
+std::ostream& operator<<(std::ostream& os, ForInMode const& mode) {
switch (mode) {
case ForInMode::kUseEnumCacheKeysAndIndices:
return os << "UseEnumCacheKeysAndIndices";
@@ -654,10 +654,26 @@ std::ostream& operator<<(std::ostream& os, ForInMode mode) {
UNREACHABLE();
}
-ForInMode ForInModeOf(Operator const* op) {
+bool operator==(ForInParameters const& lhs, ForInParameters const& rhs) {
+ return lhs.feedback() == rhs.feedback() && lhs.mode() == rhs.mode();
+}
+
+bool operator!=(ForInParameters const& lhs, ForInParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(ForInParameters const& p) {
+ return base::hash_combine(FeedbackSource::Hash()(p.feedback()), p.mode());
+}
+
+std::ostream& operator<<(std::ostream& os, ForInParameters const& p) {
+ return os << p.feedback() << ", " << p.mode();
+}
+
+ForInParameters const& ForInParametersOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kJSForInNext ||
op->opcode() == IrOpcode::kJSForInPrepare);
- return OpParameter<ForInMode>(op);
+ return OpParameter<ForInParameters>(op);
}
#define CACHED_OP_LIST(V) \
@@ -693,7 +709,7 @@ ForInMode ForInModeOf(Operator const* op) {
V(PromiseResolve, Operator::kNoProperties, 2, 1) \
V(RejectPromise, Operator::kNoDeopt | Operator::kNoThrow, 3, 1) \
V(ResolvePromise, Operator::kNoDeopt | Operator::kNoThrow, 2, 1) \
- V(GetSuperConstructor, Operator::kNoWrite, 1, 1) \
+ V(GetSuperConstructor, Operator::kNoWrite | Operator::kNoThrow, 1, 1) \
V(ParseInt, Operator::kNoProperties, 2, 1) \
V(RegExpTest, Operator::kNoProperties, 2, 1)
@@ -919,12 +935,13 @@ const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name,
access); // parameter
}
-const Operator* JSOperatorBuilder::LoadNamedFromSuper(Handle<Name> name) {
+const Operator* JSOperatorBuilder::LoadNamedFromSuper(
+ Handle<Name> name, const FeedbackSource& feedback) {
static constexpr int kReceiver = 1;
static constexpr int kHomeObject = 1;
- static constexpr int kArity = kReceiver + kHomeObject;
- // TODO(marja, v8:9237): Use real feedback.
- NamedAccess access(LanguageMode::kSloppy, name, FeedbackSource());
+ static constexpr int kFeedbackVector = 1;
+ static constexpr int kArity = kReceiver + kHomeObject + kFeedbackVector;
+ NamedAccess access(LanguageMode::kSloppy, name, feedback);
return zone()->New<Operator1<NamedAccess>>( // --
IrOpcode::kJSLoadNamedFromSuper, Operator::kNoProperties, // opcode
"JSLoadNamedFromSuper", // name
@@ -961,21 +978,23 @@ const Operator* JSOperatorBuilder::HasProperty(FeedbackSource const& feedback) {
access); // parameter
}
-const Operator* JSOperatorBuilder::ForInNext(ForInMode mode) {
- return zone()->New<Operator1<ForInMode>>( // --
+const Operator* JSOperatorBuilder::ForInNext(ForInMode mode,
+ const FeedbackSource& feedback) {
+ return zone()->New<Operator1<ForInParameters>>( // --
IrOpcode::kJSForInNext, Operator::kNoProperties, // opcode
"JSForInNext", // name
- 4, 1, 1, 1, 1, 2, // counts
- mode); // parameter
-}
-
-const Operator* JSOperatorBuilder::ForInPrepare(ForInMode mode) {
- return zone()->New<Operator1<ForInMode>>( // --
- IrOpcode::kJSForInPrepare, // opcode
- Operator::kNoWrite | Operator::kNoThrow, // flags
- "JSForInPrepare", // name
- 1, 1, 1, 3, 1, 1, // counts
- mode); // parameter
+ 5, 1, 1, 1, 1, 2, // counts
+ ForInParameters{feedback, mode}); // parameter
+}
+
+const Operator* JSOperatorBuilder::ForInPrepare(
+ ForInMode mode, const FeedbackSource& feedback) {
+ return zone()->New<Operator1<ForInParameters>>( // --
+ IrOpcode::kJSForInPrepare, // opcode
+ Operator::kNoWrite | Operator::kNoThrow, // flags
+ "JSForInPrepare", // name
+ 2, 1, 1, 3, 1, 1, // counts
+ ForInParameters{feedback, mode}); // parameter
}
const Operator* JSOperatorBuilder::GeneratorStore(int register_count) {
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 4043969000..7e61bf3760 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -789,18 +789,32 @@ std::ostream& operator<<(std::ostream&, GetIteratorParameters const&);
const GetIteratorParameters& GetIteratorParametersOf(const Operator* op);
-// Descriptor used by the JSForInPrepare and JSForInNext opcodes.
enum class ForInMode : uint8_t {
kUseEnumCacheKeysAndIndices,
kUseEnumCacheKeys,
kGeneric
};
+size_t hash_value(ForInMode const&);
+std::ostream& operator<<(std::ostream&, ForInMode const&);
-size_t hash_value(ForInMode);
+class ForInParameters final {
+ public:
+ ForInParameters(const FeedbackSource& feedback, ForInMode mode)
+ : feedback_(feedback), mode_(mode) {}
+
+ const FeedbackSource& feedback() const { return feedback_; }
+ ForInMode mode() const { return mode_; }
-std::ostream& operator<<(std::ostream&, ForInMode);
+ private:
+ const FeedbackSource feedback_;
+ const ForInMode mode_;
+};
-ForInMode ForInModeOf(Operator const* op) V8_WARN_UNUSED_RESULT;
+bool operator==(ForInParameters const&, ForInParameters const&);
+bool operator!=(ForInParameters const&, ForInParameters const&);
+size_t hash_value(ForInParameters const&);
+std::ostream& operator<<(std::ostream&, ForInParameters const&);
+const ForInParameters& ForInParametersOf(const Operator* op);
int RegisterCountOf(Operator const* op) V8_WARN_UNUSED_RESULT;
@@ -816,6 +830,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
: public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit JSOperatorBuilder(Zone* zone);
+ JSOperatorBuilder(const JSOperatorBuilder&) = delete;
+ JSOperatorBuilder& operator=(const JSOperatorBuilder&) = delete;
const Operator* Equal(FeedbackSource const& feedback);
const Operator* StrictEqual(FeedbackSource const& feedback);
@@ -921,7 +937,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* LoadProperty(FeedbackSource const& feedback);
const Operator* LoadNamed(Handle<Name> name, FeedbackSource const& feedback);
- const Operator* LoadNamedFromSuper(Handle<Name> name);
+ const Operator* LoadNamedFromSuper(Handle<Name> name,
+ FeedbackSource const& feedback);
const Operator* StoreProperty(LanguageMode language_mode,
FeedbackSource const& feedback);
@@ -966,8 +983,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* AsyncFunctionResolve();
const Operator* ForInEnumerate();
- const Operator* ForInNext(ForInMode);
- const Operator* ForInPrepare(ForInMode);
+ const Operator* ForInNext(ForInMode mode, const FeedbackSource& feedback);
+ const Operator* ForInPrepare(ForInMode mode, const FeedbackSource& feedback);
const Operator* LoadMessage();
const Operator* StoreMessage();
@@ -1010,8 +1027,6 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const JSOperatorGlobalCache& cache_;
Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(JSOperatorBuilder);
};
// Node wrappers.
@@ -1399,9 +1414,13 @@ class JSLoadNamedFromSuperNode final : public JSNodeWrapperBase {
const NamedAccess& Parameters() const { return NamedAccessOf(node()->op()); }
-#define INPUTS(V) \
- V(Receiver, receiver, 0, Object) \
- V(Object, home_object, 1, Object)
+ // TODO(marja, v8:9237): A more intuitive order would be (home_object,
+ // receiver, feedback_vector). The order can be changed once we no longer
+ // delegate to Runtime_LoadFromSuper.
+#define INPUTS(V) \
+ V(Receiver, receiver, 0, Object) \
+ V(HomeObject, home_object, 1, Object) \
+ V(FeedbackVector, feedback_vector, 2, HeapObject)
INPUTS(DEFINE_INPUT_ACCESSORS)
#undef INPUTS
};
@@ -1546,6 +1565,43 @@ class JSCreateClosureNode final : public JSNodeWrapperBase {
FeedbackCellRef GetFeedbackCellRefChecked(JSHeapBroker* broker) const;
};
+class JSForInPrepareNode final : public JSNodeWrapperBase {
+ public:
+ explicit constexpr JSForInPrepareNode(Node* node) : JSNodeWrapperBase(node) {
+ CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSForInPrepare);
+ }
+
+ const ForInParameters& Parameters() const {
+ return ForInParametersOf(node()->op());
+ }
+
+#define INPUTS(V) \
+ V(Enumerator, enumerator, 0, Object) \
+ V(FeedbackVector, feedback_vector, 1, HeapObject)
+ INPUTS(DEFINE_INPUT_ACCESSORS)
+#undef INPUTS
+};
+
+class JSForInNextNode final : public JSNodeWrapperBase {
+ public:
+ explicit constexpr JSForInNextNode(Node* node) : JSNodeWrapperBase(node) {
+ CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSForInNext);
+ }
+
+ const ForInParameters& Parameters() const {
+ return ForInParametersOf(node()->op());
+ }
+
+#define INPUTS(V) \
+ V(Receiver, receiver, 0, Object) \
+ V(CacheArray, cache_array, 1, Object) \
+ V(CacheType, cache_type, 2, Object) \
+ V(Index, index, 3, Smi) \
+ V(FeedbackVector, feedback_vector, 4, HeapObject)
+ INPUTS(DEFINE_INPUT_ACCESSORS)
+#undef INPUTS
+};
+
#undef DEFINE_INPUT_ACCESSORS
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 808c59a65e..046ed47577 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -513,9 +513,9 @@ JSTypeHintLowering::ReduceGetIteratorOperation(const Operator* op,
}
JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadNamedOperation(
- const Operator* op, Node* receiver, Node* effect, Node* control,
- FeedbackSlot slot) const {
- DCHECK_EQ(IrOpcode::kJSLoadNamed, op->opcode());
+ const Operator* op, Node* effect, Node* control, FeedbackSlot slot) const {
+ DCHECK(op->opcode() == IrOpcode::kJSLoadNamed ||
+ op->opcode() == IrOpcode::kJSLoadNamedFromSuper);
if (Node* node = TryBuildSoftDeopt(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
@@ -574,8 +574,8 @@ Node* JSTypeHintLowering::TryBuildSoftDeopt(FeedbackSlot slot, Node* effect,
FeedbackSource source(feedback_vector(), slot);
// TODO(mythria): Think of adding flags to specify if we need a soft deopt for
- // calls instead of using FLAG_turboprop here.
- if (FLAG_turboprop &&
+ // calls instead of using broker()->is_turboprop() here.
+ if (broker()->is_turboprop() &&
broker()->GetFeedbackSlotKind(source) == FeedbackSlotKind::kCall) {
return nullptr;
}
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
index 256858c1c6..c89acd12ff 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.h
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -14,7 +14,6 @@ namespace v8 {
namespace internal {
// Forward declarations.
-class FeedbackNexus;
class FeedbackSlot;
namespace compiler {
@@ -43,6 +42,8 @@ class JSTypeHintLowering {
JSTypeHintLowering(JSHeapBroker* broker, JSGraph* jsgraph,
FeedbackVectorRef feedback_vector, Flags flags);
+ JSTypeHintLowering(const JSTypeHintLowering&) = delete;
+ JSTypeHintLowering& operator=(const JSTypeHintLowering&) = delete;
// {LoweringResult} describes the result of lowering. The following outcomes
// are possible:
@@ -143,8 +144,8 @@ class JSTypeHintLowering {
FeedbackSlot call_slot) const;
// Potential reduction of property access operations.
- LoweringResult ReduceLoadNamedOperation(const Operator* op, Node* obj,
- Node* effect, Node* control,
+ LoweringResult ReduceLoadNamedOperation(const Operator* op, Node* effect,
+ Node* control,
FeedbackSlot slot) const;
LoweringResult ReduceLoadKeyedOperation(const Operator* op, Node* obj,
Node* key, Node* effect,
@@ -177,8 +178,6 @@ class JSTypeHintLowering {
JSGraph* const jsgraph_;
Flags const flags_;
FeedbackVectorRef const feedback_vector_;
-
- DISALLOW_COPY_AND_ASSIGN(JSTypeHintLowering);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 46018225a3..9927cc0b70 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -107,11 +107,11 @@ class JSBinopReduction final {
GetBinaryOperationHint(node_) == BinaryOperationHint::kString) {
HeapObjectBinopMatcher m(node_);
JSHeapBroker* broker = lowering_->broker();
- if (m.right().HasValue() && m.right().Ref(broker).IsString()) {
+ if (m.right().HasResolvedValue() && m.right().Ref(broker).IsString()) {
StringRef right_string = m.right().Ref(broker).AsString();
if (right_string.length() >= ConsString::kMinLength) return true;
}
- if (m.left().HasValue() && m.left().Ref(broker).IsString()) {
+ if (m.left().HasResolvedValue() && m.left().Ref(broker).IsString()) {
StringRef left_string = m.left().Ref(broker).AsString();
if (left_string.length() >= ConsString::kMinLength) {
// The invariant for ConsString requires the left hand side to be
@@ -989,7 +989,7 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
if (input_type.Is(Type::String())) {
HeapObjectMatcher m(input);
- if (m.HasValue() && m.Ref(broker()).IsString()) {
+ if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
StringRef input_value = m.Ref(broker()).AsString();
double number;
ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());
@@ -1492,8 +1492,6 @@ namespace {
void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
CallDescriptor::Flags flags) {
// Patch {node} to a direct CEntry call.
- //
- // When V8_REVERSE_JSARGS is set:
// ----------- A r g u m e n t s -----------
// -- 0: CEntry
// --- Stack args ---
@@ -1507,21 +1505,6 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
// -- 6 + n: the C entry point
// -- 6 + n + 1: argc (Int32)
// -----------------------------------
- //
- // Otherwise:
- // ----------- A r g u m e n t s -----------
- // -- 0: CEntry
- // --- Stack args ---
- // -- 1: receiver
- // -- [2, 2 + n[: the n actual arguments passed to the builtin
- // -- 2 + n: padding
- // -- 2 + n + 1: argc, including the receiver and implicit args (Smi)
- // -- 2 + n + 2: target
- // -- 2 + n + 3: new_target
- // --- Register args ---
- // -- 2 + n + 4: the C entry point
- // -- 2 + n + 5: argc (Int32)
- // -----------------------------------
// The logic contained here is mirrored in Builtins::Generate_Adaptor.
// Keep these in sync.
@@ -1558,19 +1541,11 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
Node* argc_node = jsgraph->Constant(argc);
static const int kStubAndReceiver = 2;
-#ifdef V8_REVERSE_JSARGS
node->InsertInput(zone, 1, new_target);
node->InsertInput(zone, 2, target);
node->InsertInput(zone, 3, argc_node);
node->InsertInput(zone, 4, jsgraph->PaddingConstant());
int cursor = arity + kStubAndReceiver + BuiltinArguments::kNumExtraArgs;
-#else
- int cursor = arity + kStubAndReceiver;
- node->InsertInput(zone, cursor++, jsgraph->PaddingConstant());
- node->InsertInput(zone, cursor++, argc_node);
- node->InsertInput(zone, cursor++, target);
- node->InsertInput(zone, cursor++, new_target);
-#endif
Address entry = Builtins::CppEntryOf(builtin_index);
ExternalReference entry_ref = ExternalReference::Create(entry);
@@ -1803,51 +1778,18 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
#else
if (NeedsArgumentAdaptorFrame(*shared, arity)) {
node->RemoveInput(n.FeedbackVectorIndex());
-
- // Check if it's safe to skip the arguments adaptor for {shared},
- // that is whether the target function anyways cannot observe the
- // actual arguments. Details can be found in this document at
- // https://bit.ly/v8-faster-calls-with-arguments-mismatch and
- // on the tracking bug at https://crbug.com/v8/8895
- if (shared->is_safe_to_skip_arguments_adaptor()) {
- // Currently we only support skipping arguments adaptor frames
- // for strict mode functions, since there's Function.arguments
- // legacy accessor, which is still available in sloppy mode.
- DCHECK_EQ(LanguageMode::kStrict, shared->language_mode());
-
- // Massage the arguments to match the expected number of arguments.
- int expected_argument_count = shared->internal_formal_parameter_count();
- for (; arity > expected_argument_count; --arity) {
- node->RemoveInput(arity + 1);
- }
- for (; arity < expected_argument_count; ++arity) {
- node->InsertInput(graph()->zone(), arity + 2,
- jsgraph()->UndefinedConstant());
- }
-
- // Patch {node} to a direct call.
- node->InsertInput(graph()->zone(), arity + 2, new_target);
- node->InsertInput(graph()->zone(), arity + 3,
- jsgraph()->Constant(arity));
- NodeProperties::ChangeOp(node,
- common()->Call(Linkage::GetJSCallDescriptor(
- graph()->zone(), false, 1 + arity,
- flags | CallDescriptor::kCanUseRoots)));
- } else {
- // Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
- Callable callable = CodeFactory::ArgumentAdaptor(isolate());
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, new_target);
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
- node->InsertInput(
- graph()->zone(), 4,
- jsgraph()->Constant(shared->internal_formal_parameter_count()));
- NodeProperties::ChangeOp(
- node,
- common()->Call(Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 1 + arity, flags)));
- }
+ // Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
+ Callable callable = CodeFactory::ArgumentAdaptor(isolate());
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, new_target);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(
+ graph()->zone(), 4,
+ jsgraph()->Constant(shared->internal_formal_parameter_count()));
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(), 1 + arity, flags)));
#endif
} else if (shared->HasBuiltinId() &&
Builtins::IsCpp(shared->builtin_id())) {
@@ -1912,23 +1854,22 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
}
Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
- DCHECK_EQ(IrOpcode::kJSForInNext, node->opcode());
- ForInMode const mode = ForInModeOf(node->op());
- Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* cache_array = NodeProperties::GetValueInput(node, 1);
- Node* cache_type = NodeProperties::GetValueInput(node, 2);
- Node* index = NodeProperties::GetValueInput(node, 3);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
+ JSForInNextNode n(node);
+ Node* receiver = n.receiver();
+ Node* cache_array = n.cache_array();
+ Node* cache_type = n.cache_type();
+ Node* index = n.index();
+ Node* context = n.context();
+ FrameState frame_state = n.frame_state();
+ Effect effect = n.effect();
+ Control control = n.control();
// Load the map of the {receiver}.
Node* receiver_map = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
receiver, effect, control);
- switch (mode) {
+ switch (n.Parameters().mode()) {
case ForInMode::kUseEnumCacheKeys:
case ForInMode::kUseEnumCacheKeysAndIndices: {
// Ensure that the expected map still matches that of the {receiver}.
@@ -2025,16 +1966,15 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
}
Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
- DCHECK_EQ(IrOpcode::kJSForInPrepare, node->opcode());
- ForInMode const mode = ForInModeOf(node->op());
- Node* enumerator = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
+ JSForInPrepareNode n(node);
+ Node* enumerator = n.enumerator();
+ Effect effect = n.effect();
+ Control control = n.control();
Node* cache_type = enumerator;
Node* cache_array = nullptr;
Node* cache_length = nullptr;
- switch (mode) {
+ switch (n.Parameters().mode()) {
case ForInMode::kUseEnumCacheKeys:
case ForInMode::kUseEnumCacheKeysAndIndices: {
// Check that the {enumerator} is a Map.
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index cde4b96c87..ee025896c0 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -339,11 +339,7 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
// All parameters to JS calls go on the stack.
for (int i = 0; i < js_parameter_count; i++) {
-#ifdef V8_REVERSE_JSARGS
int spill_slot_index = -i - 1;
-#else
- int spill_slot_index = i - js_parameter_count;
-#endif
locations.AddParam(LinkageLocation::ForCallerFrameSlot(
spill_slot_index, MachineType::AnyTagged()));
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index d96b049d92..ad68a57957 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -276,6 +276,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
stack_order_(stack_order),
debug_name_(debug_name) {}
+ CallDescriptor(const CallDescriptor&) = delete;
+ CallDescriptor& operator=(const CallDescriptor&) = delete;
+
// Returns the kind of this call.
Kind kind() const { return kind_; }
@@ -317,16 +320,12 @@ class V8_EXPORT_PRIVATE CallDescriptor final
}
int GetStackIndexFromSlot(int slot_index) const {
-#ifdef V8_REVERSE_JSARGS
switch (GetStackArgumentOrder()) {
case StackArgumentOrder::kDefault:
return -slot_index - 1;
case StackArgumentOrder::kJS:
return slot_index + static_cast<int>(StackParameterCount());
}
-#else
- return -slot_index - 1;
-#endif
}
// The total number of inputs to this call, which includes the target,
@@ -433,8 +432,6 @@ class V8_EXPORT_PRIVATE CallDescriptor final
const StackArgumentOrder stack_order_;
const char* const debug_name_;
const CFunctionInfo* c_function_info_ = nullptr;
-
- DISALLOW_COPY_AND_ASSIGN(CallDescriptor);
};
DEFINE_OPERATORS_FOR_FLAGS(CallDescriptor::Flags)
@@ -460,6 +457,8 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit Linkage(CallDescriptor* incoming) : incoming_(incoming) {}
+ Linkage(const Linkage&) = delete;
+ Linkage& operator=(const Linkage&) = delete;
static CallDescriptor* ComputeIncoming(Zone* zone,
OptimizedCompilationInfo* info);
@@ -558,8 +557,6 @@ class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
private:
CallDescriptor* const incoming_;
-
- DISALLOW_COPY_AND_ASSIGN(Linkage);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 6b7cb86cdd..e0f4eb7fe2 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -33,6 +33,8 @@ class V8_EXPORT_PRIVATE LoadElimination final
LoadElimination(Editor* editor, JSGraph* jsgraph, Zone* zone)
: AdvancedReducer(editor), node_states_(zone), jsgraph_(jsgraph) {}
~LoadElimination() final = default;
+ LoadElimination(const LoadElimination&) = delete;
+ LoadElimination& operator=(const LoadElimination&) = delete;
const char* reducer_name() const override { return "LoadElimination"; }
@@ -335,8 +337,6 @@ class V8_EXPORT_PRIVATE LoadElimination final
AbstractStateForEffectNodes node_states_;
JSGraph* const jsgraph_;
-
- DISALLOW_COPY_AND_ASSIGN(LoadElimination);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/machine-graph.h b/deps/v8/src/compiler/machine-graph.h
index 87175847f5..a110a4b7e8 100644
--- a/deps/v8/src/compiler/machine-graph.h
+++ b/deps/v8/src/compiler/machine-graph.h
@@ -25,6 +25,8 @@ class V8_EXPORT_PRIVATE MachineGraph : public NON_EXPORTED_BASE(ZoneObject) {
MachineGraph(Graph* graph, CommonOperatorBuilder* common,
MachineOperatorBuilder* machine)
: graph_(graph), common_(common), machine_(machine), cache_(zone()) {}
+ MachineGraph(const MachineGraph&) = delete;
+ MachineGraph& operator=(const MachineGraph&) = delete;
// Creates a Int32Constant node, usually canonicalized.
Node* Int32Constant(int32_t value);
@@ -84,8 +86,6 @@ class V8_EXPORT_PRIVATE MachineGraph : public NON_EXPORTED_BASE(ZoneObject) {
MachineOperatorBuilder* machine_;
CommonNodeCache cache_;
Node* Dead_ = nullptr;
-
- DISALLOW_COPY_AND_ASSIGN(MachineGraph);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 55f39d76e8..918caaf8fd 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -305,8 +305,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x ror 0 => x
if (m.IsFoldable()) { // K ror K => K (K stands for arbitrary constants)
- return ReplaceInt32(base::bits::RotateRight32(m.left().Value(),
- m.right().Value() & 31));
+ return ReplaceInt32(base::bits::RotateRight32(
+ m.left().ResolvedValue(), m.right().ResolvedValue() & 31));
}
break;
}
@@ -316,7 +316,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kWord64Equal: {
Int64BinopMatcher m(node);
if (m.IsFoldable()) { // K == K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() == m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() ==
+ m.right().ResolvedValue());
}
if (m.left().IsInt64Sub() && m.right().Is(0)) { // x - y == 0 => x == y
Int64BinopMatcher msub(m.left().node());
@@ -341,8 +342,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.right().Is(0)) return Replace(m.right().node()); // x * 0 => 0
if (m.right().Is(1)) return Replace(m.left().node()); // x * 1 => x
if (m.IsFoldable()) { // K * K => K (K stands for arbitrary constants)
- return ReplaceInt32(
- base::MulWithWraparound(m.left().Value(), m.right().Value()));
+ return ReplaceInt32(base::MulWithWraparound(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.right().Is(-1)) { // x * -1 => 0 - x
node->ReplaceInput(0, Int32Constant(0));
@@ -351,17 +352,18 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return Changed(node);
}
if (m.right().IsPowerOf2()) { // x * 2^n => x << n
- node->ReplaceInput(
- 1, Int32Constant(base::bits::WhichPowerOfTwo(m.right().Value())));
+ node->ReplaceInput(1, Int32Constant(base::bits::WhichPowerOfTwo(
+ m.right().ResolvedValue())));
NodeProperties::ChangeOp(node, machine()->Word32Shl());
return Changed(node).FollowedBy(ReduceWord32Shl(node));
}
// (x * Int32Constant(a)) * Int32Constant(b)) => x * Int32Constant(a * b)
- if (m.right().HasValue() && m.left().IsInt32Mul()) {
+ if (m.right().HasResolvedValue() && m.left().IsInt32Mul()) {
Int32BinopMatcher n(m.left().node());
- if (n.right().HasValue() && m.OwnsInput(m.left().node())) {
- node->ReplaceInput(1, Int32Constant(base::MulWithWraparound(
- m.right().Value(), n.right().Value())));
+ if (n.right().HasResolvedValue() && m.OwnsInput(m.left().node())) {
+ node->ReplaceInput(
+ 1, Int32Constant(base::MulWithWraparound(
+ m.right().ResolvedValue(), n.right().ResolvedValue())));
node->ReplaceInput(0, n.left().node());
return Changed(node);
}
@@ -396,7 +398,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kInt32LessThan: {
Int32BinopMatcher m(node);
if (m.IsFoldable()) { // K < K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() < m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <
+ m.right().ResolvedValue());
}
if (m.LeftEqualsRight()) return ReplaceBool(false); // x < x => false
if (m.left().IsWord32Or() && m.right().Is(0)) {
@@ -412,7 +415,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kInt32LessThanOrEqual: {
Int32BinopMatcher m(node);
if (m.IsFoldable()) { // K <= K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() <= m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <=
+ m.right().ResolvedValue());
}
if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
return ReduceWord32Comparisons(node);
@@ -422,16 +426,17 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.left().Is(kMaxUInt32)) return ReplaceBool(false); // M < x => false
if (m.right().Is(0)) return ReplaceBool(false); // x < 0 => false
if (m.IsFoldable()) { // K < K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() < m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <
+ m.right().ResolvedValue());
}
if (m.LeftEqualsRight()) return ReplaceBool(false); // x < x => false
- if (m.left().IsWord32Sar() && m.right().HasValue()) {
+ if (m.left().IsWord32Sar() && m.right().HasResolvedValue()) {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasResolvedValue()) {
// (x >> K) < C => x < (C << K)
// when C < (M >> K)
- const uint32_t c = m.right().Value();
- const uint32_t k = mleft.right().Value() & 0x1F;
+ const uint32_t c = m.right().ResolvedValue();
+ const uint32_t k = mleft.right().ResolvedValue() & 0x1F;
if (c < static_cast<uint32_t>(kMaxInt >> k)) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1, Uint32Constant(c << k));
@@ -447,7 +452,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.left().Is(0)) return ReplaceBool(true); // 0 <= x => true
if (m.right().Is(kMaxUInt32)) return ReplaceBool(true); // x <= M => true
if (m.IsFoldable()) { // K <= K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() <= m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <=
+ m.right().ResolvedValue());
}
if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
return ReduceWord32Comparisons(node);
@@ -455,19 +461,22 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat32Sub: {
Float32BinopMatcher m(node);
if (allow_signalling_nan_ && m.right().Is(0) &&
- (std::copysign(1.0, m.right().Value()) > 0)) {
+ (std::copysign(1.0, m.right().ResolvedValue()) > 0)) {
return Replace(m.left().node()); // x - 0 => x
}
if (m.right().IsNaN()) { // x - NaN => NaN
// Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat32(m.right().Value() - m.right().Value());
+ return ReplaceFloat32(m.right().ResolvedValue() -
+ m.right().ResolvedValue());
}
if (m.left().IsNaN()) { // NaN - x => NaN
// Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat32(m.left().Value() - m.left().Value());
+ return ReplaceFloat32(m.left().ResolvedValue() -
+ m.left().ResolvedValue());
}
if (m.IsFoldable()) { // L - R => (L - R)
- return ReplaceFloat32(m.left().Value() - m.right().Value());
+ return ReplaceFloat32(m.left().ResolvedValue() -
+ m.right().ResolvedValue());
}
if (allow_signalling_nan_ && m.left().IsMinusZero()) {
// -0.0 - round_down(-0.0 - R) => round_up(R)
@@ -491,26 +500,30 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat64Add: {
Float64BinopMatcher m(node);
if (m.IsFoldable()) { // K + K => K (K stands for arbitrary constants)
- return ReplaceFloat64(m.left().Value() + m.right().Value());
+ return ReplaceFloat64(m.left().ResolvedValue() +
+ m.right().ResolvedValue());
}
break;
}
case IrOpcode::kFloat64Sub: {
Float64BinopMatcher m(node);
if (allow_signalling_nan_ && m.right().Is(0) &&
- (Double(m.right().Value()).Sign() > 0)) {
+ (Double(m.right().ResolvedValue()).Sign() > 0)) {
return Replace(m.left().node()); // x - 0 => x
}
if (m.right().IsNaN()) { // x - NaN => NaN
// Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.right().Value() - m.right().Value());
+ return ReplaceFloat64(m.right().ResolvedValue() -
+ m.right().ResolvedValue());
}
if (m.left().IsNaN()) { // NaN - x => NaN
// Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.left().Value() - m.left().Value());
+ return ReplaceFloat64(m.left().ResolvedValue() -
+ m.left().ResolvedValue());
}
if (m.IsFoldable()) { // L - R => (L - R)
- return ReplaceFloat64(m.left().Value() - m.right().Value());
+ return ReplaceFloat64(m.left().ResolvedValue() -
+ m.right().ResolvedValue());
}
if (allow_signalling_nan_ && m.left().IsMinusZero()) {
// -0.0 - round_down(-0.0 - R) => round_up(R)
@@ -543,10 +556,12 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
if (m.right().IsNaN()) { // x * NaN => NaN
// Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.right().Value() - m.right().Value());
+ return ReplaceFloat64(m.right().ResolvedValue() -
+ m.right().ResolvedValue());
}
if (m.IsFoldable()) { // K * K => K (K stands for arbitrary constants)
- return ReplaceFloat64(m.left().Value() * m.right().Value());
+ return ReplaceFloat64(m.left().ResolvedValue() *
+ m.right().ResolvedValue());
}
if (m.right().Is(2)) { // x * 2.0 => x + x
node->ReplaceInput(1, m.left().node());
@@ -562,15 +577,17 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
// TODO(ahaas): We could do x / 1.0 = x if we knew that x is not an sNaN.
if (m.right().IsNaN()) { // x / NaN => NaN
// Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.right().Value() - m.right().Value());
+ return ReplaceFloat64(m.right().ResolvedValue() -
+ m.right().ResolvedValue());
}
if (m.left().IsNaN()) { // NaN / x => NaN
// Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.left().Value() - m.left().Value());
+ return ReplaceFloat64(m.left().ResolvedValue() -
+ m.left().ResolvedValue());
}
if (m.IsFoldable()) { // K / K => K (K stands for arbitrary constants)
return ReplaceFloat64(
- base::Divide(m.left().Value(), m.right().Value()));
+ base::Divide(m.left().ResolvedValue(), m.right().ResolvedValue()));
}
if (allow_signalling_nan_ && m.right().Is(-1)) { // x / -1.0 => -x
node->RemoveInput(1);
@@ -581,7 +598,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
// All reciprocals of non-denormal powers of two can be represented
// exactly, so division by power of two can be reduced to
// multiplication by reciprocal, with the same result.
- node->ReplaceInput(1, Float64Constant(1.0 / m.right().Value()));
+ node->ReplaceInput(1, Float64Constant(1.0 / m.right().ResolvedValue()));
NodeProperties::ChangeOp(node, machine()->Float64Mul());
return Changed(node);
}
@@ -599,38 +616,45 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return Replace(m.left().node());
}
if (m.IsFoldable()) { // K % K => K (K stands for arbitrary constants)
- return ReplaceFloat64(Modulo(m.left().Value(), m.right().Value()));
+ return ReplaceFloat64(
+ Modulo(m.left().ResolvedValue(), m.right().ResolvedValue()));
}
break;
}
case IrOpcode::kFloat64Acos: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::acos(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::acos(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Acosh: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::acosh(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::acosh(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Asin: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::asin(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::asin(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Asinh: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::asinh(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::asinh(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Atan: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::atan(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::atan(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Atanh: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::atanh(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::atanh(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Atan2: {
@@ -642,61 +666,70 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return Replace(m.left().node());
}
if (m.IsFoldable()) {
- return ReplaceFloat64(
- base::ieee754::atan2(m.left().Value(), m.right().Value()));
+ return ReplaceFloat64(base::ieee754::atan2(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
break;
}
case IrOpcode::kFloat64Cbrt: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::cbrt(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::cbrt(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Cos: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::cos(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::cos(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Cosh: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::cosh(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::cosh(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Exp: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::exp(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::exp(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Expm1: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::expm1(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::expm1(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Log: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::log(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::log(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Log1p: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::log1p(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::log1p(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Log10: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::log10(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::log10(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Log2: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::log2(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::log2(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Pow: {
Float64BinopMatcher m(node);
if (m.IsFoldable()) {
- return ReplaceFloat64(
- base::ieee754::pow(m.left().Value(), m.right().Value()));
+ return ReplaceFloat64(base::ieee754::pow(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
} else if (m.right().Is(0.0)) { // x ** +-0.0 => 1.0
return ReplaceFloat64(1.0);
} else if (m.right().Is(-2.0)) { // x ** -2.0 => 1 / (x * x)
@@ -722,87 +755,99 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Sin: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::sin(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::sin(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Sinh: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::sinh(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::sinh(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Tan: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::tan(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::tan(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Tanh: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::tanh(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::tanh(m.ResolvedValue()));
break;
}
case IrOpcode::kChangeFloat32ToFloat64: {
Float32Matcher m(node->InputAt(0));
- if (m.HasValue()) {
- if (!allow_signalling_nan_ && std::isnan(m.Value())) {
+ if (m.HasResolvedValue()) {
+ if (!allow_signalling_nan_ && std::isnan(m.ResolvedValue())) {
// Do some calculation to make guarantee the value is a quiet NaN.
- return ReplaceFloat64(m.Value() + m.Value());
+ return ReplaceFloat64(m.ResolvedValue() + m.ResolvedValue());
}
- return ReplaceFloat64(m.Value());
+ return ReplaceFloat64(m.ResolvedValue());
}
break;
}
case IrOpcode::kChangeFloat64ToInt32: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(FastD2IChecked(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceInt32(FastD2IChecked(m.ResolvedValue()));
if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
case IrOpcode::kChangeFloat64ToInt64: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt64(static_cast<int64_t>(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceInt64(static_cast<int64_t>(m.ResolvedValue()));
if (m.IsChangeInt64ToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
case IrOpcode::kChangeFloat64ToUint32: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(FastD2UI(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceInt32(FastD2UI(m.ResolvedValue()));
if (m.IsChangeUint32ToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
case IrOpcode::kChangeInt32ToFloat64: {
Int32Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(FastI2D(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(FastI2D(m.ResolvedValue()));
break;
}
case IrOpcode::kBitcastWord32ToWord64: {
Int32Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt64(m.Value());
+ if (m.HasResolvedValue()) return ReplaceInt64(m.ResolvedValue());
break;
}
case IrOpcode::kChangeInt32ToInt64: {
Int32Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt64(m.Value());
+ if (m.HasResolvedValue()) return ReplaceInt64(m.ResolvedValue());
break;
}
case IrOpcode::kChangeInt64ToFloat64: {
Int64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(static_cast<double>(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(static_cast<double>(m.ResolvedValue()));
if (m.IsChangeFloat64ToInt64()) return Replace(m.node()->InputAt(0));
break;
}
case IrOpcode::kChangeUint32ToFloat64: {
Uint32Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(FastUI2D(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(FastUI2D(m.ResolvedValue()));
break;
}
case IrOpcode::kChangeUint32ToUint64: {
Uint32Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt64(static_cast<uint64_t>(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceInt64(static_cast<uint64_t>(m.ResolvedValue()));
break;
}
case IrOpcode::kTruncateFloat64ToWord32: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceInt32(DoubleToInt32(m.ResolvedValue()));
if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
return NoChange();
}
@@ -810,12 +855,13 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReduceTruncateInt64ToInt32(node);
case IrOpcode::kTruncateFloat64ToFloat32: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) {
- if (!allow_signalling_nan_ && std::isnan(m.Value())) {
+ if (m.HasResolvedValue()) {
+ if (!allow_signalling_nan_ && std::isnan(m.ResolvedValue())) {
// Do some calculation to make guarantee the value is a quiet NaN.
- return ReplaceFloat32(DoubleToFloat32(m.Value() + m.Value()));
+ return ReplaceFloat32(
+ DoubleToFloat32(m.ResolvedValue() + m.ResolvedValue()));
}
- return ReplaceFloat32(DoubleToFloat32(m.Value()));
+ return ReplaceFloat32(DoubleToFloat32(m.ResolvedValue()));
}
if (allow_signalling_nan_ && m.IsChangeFloat32ToFloat64())
return Replace(m.node()->InputAt(0));
@@ -823,8 +869,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kRoundFloat64ToInt32: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) {
- return ReplaceInt32(DoubleToInt32(m.Value()));
+ if (m.HasResolvedValue()) {
+ return ReplaceInt32(DoubleToInt32(m.ResolvedValue()));
}
if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
break;
@@ -860,28 +906,32 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kInt64LessThan: {
Int64BinopMatcher m(node);
if (m.IsFoldable()) { // K < K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() < m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <
+ m.right().ResolvedValue());
}
return ReduceWord64Comparisons(node);
}
case IrOpcode::kInt64LessThanOrEqual: {
Int64BinopMatcher m(node);
if (m.IsFoldable()) { // K <= K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() <= m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <=
+ m.right().ResolvedValue());
}
return ReduceWord64Comparisons(node);
}
case IrOpcode::kUint64LessThan: {
Uint64BinopMatcher m(node);
if (m.IsFoldable()) { // K < K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() < m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <
+ m.right().ResolvedValue());
}
return ReduceWord64Comparisons(node);
}
case IrOpcode::kUint64LessThanOrEqual: {
Uint64BinopMatcher m(node);
if (m.IsFoldable()) { // K <= K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() <= m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <=
+ m.right().ResolvedValue());
}
return ReduceWord64Comparisons(node);
}
@@ -893,7 +943,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
Reduction MachineOperatorReducer::ReduceTruncateInt64ToInt32(Node* node) {
Int64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceInt32(static_cast<int32_t>(m.ResolvedValue()));
if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
return NoChange();
}
@@ -903,8 +954,8 @@ Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x + 0 => x
if (m.IsFoldable()) { // K + K => K (K stands for arbitrary constants)
- return ReplaceInt32(
- base::AddWithWraparound(m.left().Value(), m.right().Value()));
+ return ReplaceInt32(base::AddWithWraparound(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.left().IsInt32Sub()) {
Int32BinopMatcher mleft(m.left().node());
@@ -924,11 +975,12 @@ Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
}
}
// (x + Int32Constant(a)) + Int32Constant(b)) => x + Int32Constant(a + b)
- if (m.right().HasValue() && m.left().IsInt32Add()) {
+ if (m.right().HasResolvedValue() && m.left().IsInt32Add()) {
Int32BinopMatcher n(m.left().node());
- if (n.right().HasValue() && m.OwnsInput(m.left().node())) {
- node->ReplaceInput(1, Int32Constant(base::AddWithWraparound(
- m.right().Value(), n.right().Value())));
+ if (n.right().HasResolvedValue() && m.OwnsInput(m.left().node())) {
+ node->ReplaceInput(
+ 1, Int32Constant(base::AddWithWraparound(m.right().ResolvedValue(),
+ n.right().ResolvedValue())));
node->ReplaceInput(0, n.left().node());
return Changed(node);
}
@@ -942,15 +994,16 @@ Reduction MachineOperatorReducer::ReduceInt64Add(Node* node) {
Int64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x + 0 => 0
if (m.IsFoldable()) {
- return ReplaceInt64(
- base::AddWithWraparound(m.left().Value(), m.right().Value()));
+ return ReplaceInt64(base::AddWithWraparound(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
// (x + Int64Constant(a)) + Int64Constant(b)) => x + Int64Constant(a + b)
- if (m.right().HasValue() && m.left().IsInt64Add()) {
+ if (m.right().HasResolvedValue() && m.left().IsInt64Add()) {
Int64BinopMatcher n(m.left().node());
- if (n.right().HasValue() && m.OwnsInput(m.left().node())) {
- node->ReplaceInput(1, Int64Constant(base::AddWithWraparound(
- m.right().Value(), n.right().Value())));
+ if (n.right().HasResolvedValue() && m.OwnsInput(m.left().node())) {
+ node->ReplaceInput(
+ 1, Int64Constant(base::AddWithWraparound(m.right().ResolvedValue(),
+ n.right().ResolvedValue())));
node->ReplaceInput(0, n.left().node());
return Changed(node);
}
@@ -963,13 +1016,14 @@ Reduction MachineOperatorReducer::ReduceInt32Sub(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
if (m.IsFoldable()) { // K - K => K (K stands for arbitrary constants)
- return ReplaceInt32(
- base::SubWithWraparound(m.left().Value(), m.right().Value()));
+ return ReplaceInt32(base::SubWithWraparound(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.LeftEqualsRight()) return ReplaceInt32(0); // x - x => 0
- if (m.right().HasValue()) { // x - K => x + -K
+ if (m.right().HasResolvedValue()) { // x - K => x + -K
node->ReplaceInput(
- 1, Int32Constant(base::NegateWithWraparound(m.right().Value())));
+ 1,
+ Int32Constant(base::NegateWithWraparound(m.right().ResolvedValue())));
NodeProperties::ChangeOp(node, machine()->Int32Add());
return Changed(node).FollowedBy(ReduceInt32Add(node));
}
@@ -981,13 +1035,14 @@ Reduction MachineOperatorReducer::ReduceInt64Sub(Node* node) {
Int64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
if (m.IsFoldable()) { // K - K => K (K stands for arbitrary constants)
- return ReplaceInt64(
- base::SubWithWraparound(m.left().Value(), m.right().Value()));
+ return ReplaceInt64(base::SubWithWraparound(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.LeftEqualsRight()) return Replace(Int64Constant(0)); // x - x => 0
- if (m.right().HasValue()) { // x - K => x + -K
+ if (m.right().HasResolvedValue()) { // x - K => x + -K
node->ReplaceInput(
- 1, Int64Constant(base::NegateWithWraparound(m.right().Value())));
+ 1,
+ Int64Constant(base::NegateWithWraparound(m.right().ResolvedValue())));
NodeProperties::ChangeOp(node, machine()->Int64Add());
return Changed(node).FollowedBy(ReduceInt64Add(node));
}
@@ -1000,8 +1055,8 @@ Reduction MachineOperatorReducer::ReduceInt64Mul(Node* node) {
if (m.right().Is(0)) return Replace(m.right().node()); // x * 0 => 0
if (m.right().Is(1)) return Replace(m.left().node()); // x * 1 => x
if (m.IsFoldable()) { // K * K => K (K stands for arbitrary constants)
- return ReplaceInt64(
- base::MulWithWraparound(m.left().Value(), m.right().Value()));
+ return ReplaceInt64(base::MulWithWraparound(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.right().Is(-1)) { // x * -1 => 0 - x
node->ReplaceInput(0, Int64Constant(0));
@@ -1011,16 +1066,18 @@ Reduction MachineOperatorReducer::ReduceInt64Mul(Node* node) {
}
if (m.right().IsPowerOf2()) { // x * 2^n => x << n
node->ReplaceInput(
- 1, Int64Constant(base::bits::WhichPowerOfTwo(m.right().Value())));
+ 1,
+ Int64Constant(base::bits::WhichPowerOfTwo(m.right().ResolvedValue())));
NodeProperties::ChangeOp(node, machine()->Word64Shl());
return Changed(node).FollowedBy(ReduceWord64Shl(node));
}
// (x * Int64Constant(a)) * Int64Constant(b)) => x * Int64Constant(a * b)
- if (m.right().HasValue() && m.left().IsInt64Mul()) {
+ if (m.right().HasResolvedValue() && m.left().IsInt64Mul()) {
Int64BinopMatcher n(m.left().node());
- if (n.right().HasValue() && m.OwnsInput(m.left().node())) {
- node->ReplaceInput(1, Int64Constant(base::MulWithWraparound(
- m.right().Value(), n.right().Value())));
+ if (n.right().HasResolvedValue() && m.OwnsInput(m.left().node())) {
+ node->ReplaceInput(
+ 1, Int64Constant(base::MulWithWraparound(m.right().ResolvedValue(),
+ n.right().ResolvedValue())));
node->ReplaceInput(0, n.left().node());
return Changed(node);
}
@@ -1034,8 +1091,8 @@ Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
if (m.right().Is(0)) return Replace(m.right().node()); // x / 0 => 0
if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x
if (m.IsFoldable()) { // K / K => K (K stands for arbitrary constants)
- return ReplaceInt32(
- base::bits::SignedDiv32(m.left().Value(), m.right().Value()));
+ return ReplaceInt32(base::bits::SignedDiv32(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.LeftEqualsRight()) { // x / x => x != 0
Node* const zero = Int32Constant(0);
@@ -1048,8 +1105,8 @@ Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
NodeProperties::ChangeOp(node, machine()->Int32Sub());
return Changed(node);
}
- if (m.right().HasValue()) {
- int32_t const divisor = m.right().Value();
+ if (m.right().HasResolvedValue()) {
+ int32_t const divisor = m.right().ResolvedValue();
Node* const dividend = m.left().node();
Node* quotient = dividend;
if (base::bits::IsPowerOfTwo(Abs(divisor))) {
@@ -1081,19 +1138,19 @@ Reduction MachineOperatorReducer::ReduceUint32Div(Node* node) {
if (m.right().Is(0)) return Replace(m.right().node()); // x / 0 => 0
if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x
if (m.IsFoldable()) { // K / K => K (K stands for arbitrary constants)
- return ReplaceUint32(
- base::bits::UnsignedDiv32(m.left().Value(), m.right().Value()));
+ return ReplaceUint32(base::bits::UnsignedDiv32(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.LeftEqualsRight()) { // x / x => x != 0
Node* const zero = Int32Constant(0);
return Replace(Word32Equal(Word32Equal(m.left().node(), zero), zero));
}
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
Node* const dividend = m.left().node();
- uint32_t const divisor = m.right().Value();
+ uint32_t const divisor = m.right().ResolvedValue();
if (base::bits::IsPowerOfTwo(divisor)) { // x / 2^n => x >> n
- node->ReplaceInput(
- 1, Uint32Constant(base::bits::WhichPowerOfTwo(m.right().Value())));
+ node->ReplaceInput(1, Uint32Constant(base::bits::WhichPowerOfTwo(
+ m.right().ResolvedValue())));
node->TrimInputCount(2);
NodeProperties::ChangeOp(node, machine()->Word32Shr());
return Changed(node);
@@ -1112,12 +1169,12 @@ Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) {
if (m.right().Is(-1)) return ReplaceInt32(0); // x % -1 => 0
if (m.LeftEqualsRight()) return ReplaceInt32(0); // x % x => 0
if (m.IsFoldable()) { // K % K => K (K stands for arbitrary constants)
- return ReplaceInt32(
- base::bits::SignedMod32(m.left().Value(), m.right().Value()));
+ return ReplaceInt32(base::bits::SignedMod32(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
Node* const dividend = m.left().node();
- uint32_t const divisor = Abs(m.right().Value());
+ uint32_t const divisor = Abs(m.right().ResolvedValue());
if (base::bits::IsPowerOfTwo(divisor)) {
uint32_t const mask = divisor - 1;
Node* const zero = Int32Constant(0);
@@ -1147,14 +1204,14 @@ Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) {
if (m.right().Is(1)) return ReplaceUint32(0); // x % 1 => 0
if (m.LeftEqualsRight()) return ReplaceInt32(0); // x % x => 0
if (m.IsFoldable()) { // K % K => K (K stands for arbitrary constants)
- return ReplaceUint32(
- base::bits::UnsignedMod32(m.left().Value(), m.right().Value()));
+ return ReplaceUint32(base::bits::UnsignedMod32(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
Node* const dividend = m.left().node();
- uint32_t const divisor = m.right().Value();
+ uint32_t const divisor = m.right().ResolvedValue();
if (base::bits::IsPowerOfTwo(divisor)) { // x % 2^n => x & 2^n-1
- node->ReplaceInput(1, Uint32Constant(m.right().Value() - 1));
+ node->ReplaceInput(1, Uint32Constant(m.right().ResolvedValue() - 1));
node->TrimInputCount(2);
NodeProperties::ChangeOp(node, machine()->Word32And());
} else {
@@ -1187,10 +1244,11 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) {
switch (value->opcode()) {
case IrOpcode::kWord32And: {
Uint32BinopMatcher m(value);
- if (m.right().HasValue() && ((rep == MachineRepresentation::kWord8 &&
- (m.right().Value() & 0xFF) == 0xFF) ||
- (rep == MachineRepresentation::kWord16 &&
- (m.right().Value() & 0xFFFF) == 0xFFFF))) {
+ if (m.right().HasResolvedValue() &&
+ ((rep == MachineRepresentation::kWord8 &&
+ (m.right().ResolvedValue() & 0xFF) == 0xFF) ||
+ (rep == MachineRepresentation::kWord16 &&
+ (m.right().ResolvedValue() & 0xFFFF) == 0xFFFF))) {
node->ReplaceInput(value_input, m.left().node());
return Changed(node);
}
@@ -1203,7 +1261,7 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) {
(rep == MachineRepresentation::kWord16 &&
m.right().IsInRange(1, 16)))) {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().Is(m.right().Value())) {
+ if (mleft.right().Is(m.right().ResolvedValue())) {
node->ReplaceInput(value_input, mleft.left().node());
return Changed(node);
}
@@ -1223,8 +1281,8 @@ Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
Int32BinopMatcher m(node);
if (m.IsFoldable()) {
int32_t val;
- bool ovf = base::bits::SignedAddOverflow32(m.left().Value(),
- m.right().Value(), &val);
+ bool ovf = base::bits::SignedAddOverflow32(
+ m.left().ResolvedValue(), m.right().ResolvedValue(), &val);
return ReplaceInt32(index == 0 ? val : ovf);
}
if (m.right().Is(0)) {
@@ -1237,8 +1295,8 @@ Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
Int32BinopMatcher m(node);
if (m.IsFoldable()) {
int32_t val;
- bool ovf = base::bits::SignedSubOverflow32(m.left().Value(),
- m.right().Value(), &val);
+ bool ovf = base::bits::SignedSubOverflow32(
+ m.left().ResolvedValue(), m.right().ResolvedValue(), &val);
return ReplaceInt32(index == 0 ? val : ovf);
}
if (m.right().Is(0)) {
@@ -1251,8 +1309,8 @@ Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
Int32BinopMatcher m(node);
if (m.IsFoldable()) {
int32_t val;
- bool ovf = base::bits::SignedMulOverflow32(m.left().Value(),
- m.right().Value(), &val);
+ bool ovf = base::bits::SignedMulOverflow32(
+ m.left().ResolvedValue(), m.right().ResolvedValue(), &val);
return ReplaceInt32(index == 0 ? val : ovf);
}
if (m.right().Is(0)) {
@@ -1280,7 +1338,8 @@ Reduction MachineOperatorReducer::ReduceWord32Comparisons(Node* node) {
m.right().op() == machine()->Word32SarShiftOutZeros()) {
Int32BinopMatcher mleft(m.left().node());
Int32BinopMatcher mright(m.right().node());
- if (mleft.right().HasValue() && mright.right().Is(mleft.right().Value())) {
+ if (mleft.right().HasResolvedValue() &&
+ mright.right().Is(mleft.right().ResolvedValue())) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1, mright.left().node());
return Changed(node);
@@ -1331,7 +1390,8 @@ Reduction MachineOperatorReducer::ReduceWord64Comparisons(Node* node) {
m.right().op() == machine()->Word64SarShiftOutZeros()) {
Int64BinopMatcher mleft(m.left().node());
Int64BinopMatcher mright(m.right().node());
- if (mleft.right().HasValue() && mright.right().Is(mleft.right().Value())) {
+ if (mleft.right().HasResolvedValue() &&
+ mright.right().Is(mleft.right().ResolvedValue())) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1, mright.left().node());
return Changed(node);
@@ -1365,8 +1425,8 @@ Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x << 0 => x
if (m.IsFoldable()) { // K << K => K (K stands for arbitrary constants)
- return ReplaceInt32(
- base::ShlWithWraparound(m.left().Value(), m.right().Value()));
+ return ReplaceInt32(base::ShlWithWraparound(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.right().IsInRange(1, 31)) {
if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) {
@@ -1381,8 +1441,8 @@ Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
if (mleft.op() == machine()->Word32SarShiftOutZeros() &&
mleft.right().IsInRange(1, 31)) {
Node* x = mleft.left().node();
- int k = mleft.right().Value();
- int l = m.right().Value();
+ int k = mleft.right().ResolvedValue();
+ int l = m.right().ResolvedValue();
if (k == l) {
return Replace(x);
} else if (k > l) {
@@ -1400,11 +1460,11 @@ Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
// (x >>> K) << K => x & ~(2^K - 1)
// (x >> K) << K => x & ~(2^K - 1)
- if (mleft.right().Is(m.right().Value())) {
+ if (mleft.right().Is(m.right().ResolvedValue())) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1,
Uint32Constant(std::numeric_limits<uint32_t>::max()
- << m.right().Value()));
+ << m.right().ResolvedValue()));
NodeProperties::ChangeOp(node, machine()->Word32And());
return Changed(node).FollowedBy(ReduceWord32And(node));
}
@@ -1418,8 +1478,8 @@ Reduction MachineOperatorReducer::ReduceWord64Shl(Node* node) {
Int64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x << 0 => x
if (m.IsFoldable()) { // K << K => K (K stands for arbitrary constants)
- return ReplaceInt64(
- base::ShlWithWraparound(m.left().Value(), m.right().Value()));
+ return ReplaceInt64(base::ShlWithWraparound(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.right().IsInRange(1, 63) &&
(m.left().IsWord64Sar() || m.left().IsWord64Shr())) {
@@ -1434,8 +1494,8 @@ Reduction MachineOperatorReducer::ReduceWord64Shl(Node* node) {
if (mleft.op() == machine()->Word64SarShiftOutZeros() &&
mleft.right().IsInRange(1, 63)) {
Node* x = mleft.left().node();
- int64_t k = mleft.right().Value();
- int64_t l = m.right().Value();
+ int64_t k = mleft.right().ResolvedValue();
+ int64_t l = m.right().ResolvedValue();
if (k == l) {
return Replace(x);
} else if (k > l) {
@@ -1453,10 +1513,10 @@ Reduction MachineOperatorReducer::ReduceWord64Shl(Node* node) {
// (x >>> K) << K => x & ~(2^K - 1)
// (x >> K) << K => x & ~(2^K - 1)
- if (mleft.right().Is(m.right().Value())) {
+ if (mleft.right().Is(m.right().ResolvedValue())) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1, Uint64Constant(std::numeric_limits<uint64_t>::max()
- << m.right().Value()));
+ << m.right().ResolvedValue()));
NodeProperties::ChangeOp(node, machine()->Word64And());
return Changed(node).FollowedBy(ReduceWord64And(node));
}
@@ -1468,13 +1528,14 @@ Reduction MachineOperatorReducer::ReduceWord32Shr(Node* node) {
Uint32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
if (m.IsFoldable()) { // K >>> K => K (K stands for arbitrary constants)
- return ReplaceInt32(m.left().Value() >> (m.right().Value() & 31));
+ return ReplaceInt32(m.left().ResolvedValue() >>
+ (m.right().ResolvedValue() & 31));
}
- if (m.left().IsWord32And() && m.right().HasValue()) {
+ if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
Uint32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
- uint32_t shift = m.right().Value() & 31;
- uint32_t mask = mleft.right().Value();
+ if (mleft.right().HasResolvedValue()) {
+ uint32_t shift = m.right().ResolvedValue() & 31;
+ uint32_t mask = mleft.right().ResolvedValue();
if ((mask >> shift) == 0) {
// (m >>> s) == 0 implies ((x & m) >>> s) == 0
return ReplaceInt32(0);
@@ -1489,7 +1550,8 @@ Reduction MachineOperatorReducer::ReduceWord64Shr(Node* node) {
Uint64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
if (m.IsFoldable()) { // K >> K => K (K stands for arbitrary constants)
- return ReplaceInt64(m.left().Value() >> (m.right().Value() & 63));
+ return ReplaceInt64(m.left().ResolvedValue() >>
+ (m.right().ResolvedValue() & 63));
}
return NoChange();
}
@@ -1498,7 +1560,8 @@ Reduction MachineOperatorReducer::ReduceWord32Sar(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x >> 0 => x
if (m.IsFoldable()) { // K >> K => K (K stands for arbitrary constants)
- return ReplaceInt32(m.left().Value() >> (m.right().Value() & 31));
+ return ReplaceInt32(m.left().ResolvedValue() >>
+ (m.right().ResolvedValue() & 31));
}
if (m.left().IsWord32Shl()) {
Int32BinopMatcher mleft(m.left().node());
@@ -1532,7 +1595,8 @@ Reduction MachineOperatorReducer::ReduceWord64Sar(Node* node) {
Int64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x >> 0 => x
if (m.IsFoldable()) {
- return ReplaceInt64(m.left().Value() >> (m.right().Value() & 63));
+ return ReplaceInt64(m.left().ResolvedValue() >>
+ (m.right().ResolvedValue() & 63));
}
return NoChange();
}
@@ -1549,33 +1613,34 @@ Reduction MachineOperatorReducer::ReduceWordNAnd(Node* node) {
return Replace(m.left().node());
}
if (m.IsFoldable()) { // K & K => K (K stands for arbitrary constants)
- return a.ReplaceIntN(m.left().Value() & m.right().Value());
+ return a.ReplaceIntN(m.left().ResolvedValue() & m.right().ResolvedValue());
}
if (m.LeftEqualsRight()) return Replace(m.left().node()); // x & x => x
- if (A::IsWordNAnd(m.left()) && m.right().HasValue()) {
+ if (A::IsWordNAnd(m.left()) && m.right().HasResolvedValue()) {
typename A::IntNBinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) { // (x & K) & K => x & K
+ if (mleft.right().HasResolvedValue()) { // (x & K) & K => x & K
node->ReplaceInput(0, mleft.left().node());
- node->ReplaceInput(
- 1, a.IntNConstant(m.right().Value() & mleft.right().Value()));
+ node->ReplaceInput(1, a.IntNConstant(m.right().ResolvedValue() &
+ mleft.right().ResolvedValue()));
return Changed(node).FollowedBy(a.ReduceWordNAnd(node));
}
}
if (m.right().IsNegativePowerOf2()) {
- typename A::intN_t const mask = m.right().Value();
+ typename A::intN_t const mask = m.right().ResolvedValue();
typename A::intN_t const neg_mask = base::NegateWithWraparound(mask);
if (A::IsWordNShl(m.left())) {
typename A::UintNBinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() &&
- (mleft.right().Value() & (A::WORD_SIZE - 1)) >=
+ if (mleft.right().HasResolvedValue() &&
+ (mleft.right().ResolvedValue() & (A::WORD_SIZE - 1)) >=
base::bits::CountTrailingZeros(mask)) {
// (x << L) & (-1 << K) => x << L iff L >= K
return Replace(mleft.node());
}
} else if (A::IsIntNAdd(m.left())) {
typename A::IntNBinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() &&
- (mleft.right().Value() & mask) == mleft.right().Value()) {
+ if (mleft.right().HasResolvedValue() &&
+ (mleft.right().ResolvedValue() & mask) ==
+ mleft.right().ResolvedValue()) {
// (x + (K << L)) & (-1 << L) => (x & (-1 << L)) + (K << L)
node->ReplaceInput(0,
a.WordNAnd(mleft.left().node(), m.right().node()));
@@ -1659,9 +1724,9 @@ struct BitfieldCheck {
Uint32BinopMatcher eq(node);
if (eq.left().IsWord32And()) {
Uint32BinopMatcher mand(eq.left().node());
- if (mand.right().HasValue() && eq.right().HasValue()) {
- BitfieldCheck result{mand.left().node(), mand.right().Value(),
- eq.right().Value(), false};
+ if (mand.right().HasResolvedValue() && eq.right().HasResolvedValue()) {
+ BitfieldCheck result{mand.left().node(), mand.right().ResolvedValue(),
+ eq.right().ResolvedValue(), false};
if (mand.left().IsTruncateInt64ToInt32()) {
result.truncate_from_64_bit = true;
result.source =
@@ -1703,12 +1768,14 @@ struct BitfieldCheck {
// Look for the pattern `(val >> shift) & 1`. The shift may be omitted.
if (WordNAdapter::IsWordNAnd(NodeMatcher(node))) {
typename WordNAdapter::IntNBinopMatcher mand(node);
- if (mand.right().HasValue() && mand.right().Value() == 1) {
+ if (mand.right().HasResolvedValue() &&
+ mand.right().ResolvedValue() == 1) {
if (WordNAdapter::IsWordNShr(mand.left()) ||
WordNAdapter::IsWordNSar(mand.left())) {
typename WordNAdapter::UintNBinopMatcher shift(mand.left().node());
- if (shift.right().HasValue() && shift.right().Value() < 32u) {
- uint32_t mask = 1 << shift.right().Value();
+ if (shift.right().HasResolvedValue() &&
+ shift.right().ResolvedValue() < 32u) {
+ uint32_t mask = 1 << shift.right().ResolvedValue();
return BitfieldCheck{shift.left().node(), mask, mask,
WordNAdapter::WORD_SIZE == 64};
}
@@ -1782,9 +1849,10 @@ Reduction MachineOperatorReducer::TryMatchWord32Ror(Node* node) {
Int32BinopMatcher mshr(shr);
if (mshl.left().node() != mshr.left().node()) return NoChange();
- if (mshl.right().HasValue() && mshr.right().HasValue()) {
+ if (mshl.right().HasResolvedValue() && mshr.right().HasResolvedValue()) {
// Case where y is a constant.
- if (mshl.right().Value() + mshr.right().Value() != 32) return NoChange();
+ if (mshl.right().ResolvedValue() + mshr.right().ResolvedValue() != 32)
+ return NoChange();
} else {
Node* sub = nullptr;
Node* y = nullptr;
@@ -1817,17 +1885,17 @@ Reduction MachineOperatorReducer::ReduceWordNOr(Node* node) {
if (m.right().Is(0)) return Replace(m.left().node()); // x | 0 => x
if (m.right().Is(-1)) return Replace(m.right().node()); // x | -1 => -1
if (m.IsFoldable()) { // K | K => K (K stands for arbitrary constants)
- return a.ReplaceIntN(m.left().Value() | m.right().Value());
+ return a.ReplaceIntN(m.left().ResolvedValue() | m.right().ResolvedValue());
}
if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x
// (x & K1) | K2 => x | K2 if K2 has ones for every zero bit in K1.
// This case can be constructed by UpdateWord and UpdateWord32 in CSA.
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
if (A::IsWordNAnd(m.left())) {
typename A::IntNBinopMatcher mand(m.left().node());
- if (mand.right().HasValue()) {
- if ((m.right().Value() | mand.right().Value()) == -1) {
+ if (mand.right().HasResolvedValue()) {
+ if ((m.right().ResolvedValue() | mand.right().ResolvedValue()) == -1) {
node->ReplaceInput(0, mand.left().node());
return Changed(node);
}
@@ -1856,7 +1924,7 @@ Reduction MachineOperatorReducer::ReduceWordNXor(Node* node) {
typename A::IntNBinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x ^ 0 => x
if (m.IsFoldable()) { // K ^ K => K (K stands for arbitrary constants)
- return a.ReplaceIntN(m.left().Value() ^ m.right().Value());
+ return a.ReplaceIntN(m.left().ResolvedValue() ^ m.right().ResolvedValue());
}
if (m.LeftEqualsRight()) return ReplaceInt32(0); // x ^ x => 0
if (A::IsWordNXor(m.left()) && m.right().Is(-1)) {
@@ -1882,7 +1950,7 @@ Reduction MachineOperatorReducer::ReduceWord64Xor(Node* node) {
Reduction MachineOperatorReducer::ReduceWord32Equal(Node* node) {
Int32BinopMatcher m(node);
if (m.IsFoldable()) { // K == K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() == m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() == m.right().ResolvedValue());
}
if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y == 0 => x == y
Int32BinopMatcher msub(m.left().node());
@@ -1892,15 +1960,15 @@ Reduction MachineOperatorReducer::ReduceWord32Equal(Node* node) {
}
// TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
base::Optional<std::pair<Node*, uint32_t>> replacements;
if (m.left().IsTruncateInt64ToInt32()) {
replacements = ReduceWord32EqualForConstantRhs<Word64Adapter>(
NodeProperties::GetValueInput(m.left().node(), 0),
- static_cast<uint32_t>(m.right().Value()));
+ static_cast<uint32_t>(m.right().ResolvedValue()));
} else {
replacements = ReduceWord32EqualForConstantRhs<Word32Adapter>(
- m.left().node(), static_cast<uint32_t>(m.right().Value()));
+ m.left().node(), static_cast<uint32_t>(m.right().ResolvedValue()));
}
if (replacements) {
node->ReplaceInput(0, replacements->first);
@@ -1916,10 +1984,11 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertLowWord32(Node* node) {
DCHECK_EQ(IrOpcode::kFloat64InsertLowWord32, node->opcode());
Float64Matcher mlhs(node->InputAt(0));
Uint32Matcher mrhs(node->InputAt(1));
- if (mlhs.HasValue() && mrhs.HasValue()) {
- return ReplaceFloat64(bit_cast<double>(
- (bit_cast<uint64_t>(mlhs.Value()) & uint64_t{0xFFFFFFFF00000000}) |
- mrhs.Value()));
+ if (mlhs.HasResolvedValue() && mrhs.HasResolvedValue()) {
+ return ReplaceFloat64(
+ bit_cast<double>((bit_cast<uint64_t>(mlhs.ResolvedValue()) &
+ uint64_t{0xFFFFFFFF00000000}) |
+ mrhs.ResolvedValue()));
}
return NoChange();
}
@@ -1928,10 +1997,10 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) {
DCHECK_EQ(IrOpcode::kFloat64InsertHighWord32, node->opcode());
Float64Matcher mlhs(node->InputAt(0));
Uint32Matcher mrhs(node->InputAt(1));
- if (mlhs.HasValue() && mrhs.HasValue()) {
+ if (mlhs.HasResolvedValue() && mrhs.HasResolvedValue()) {
return ReplaceFloat64(bit_cast<double>(
- (bit_cast<uint64_t>(mlhs.Value()) & uint64_t{0xFFFFFFFF}) |
- (static_cast<uint64_t>(mrhs.Value()) << 32)));
+ (bit_cast<uint64_t>(mlhs.ResolvedValue()) & uint64_t{0xFFFFFFFF}) |
+ (static_cast<uint64_t>(mrhs.ResolvedValue()) << 32)));
}
return NoChange();
}
@@ -1939,8 +2008,8 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) {
namespace {
bool IsFloat64RepresentableAsFloat32(const Float64Matcher& m) {
- if (m.HasValue()) {
- double v = m.Value();
+ if (m.HasResolvedValue()) {
+ double v = m.ResolvedValue();
return DoubleToFloat32(v) == v;
}
return false;
@@ -1957,11 +2026,14 @@ Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
if (m.IsFoldable()) {
switch (node->opcode()) {
case IrOpcode::kFloat64Equal:
- return ReplaceBool(m.left().Value() == m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() ==
+ m.right().ResolvedValue());
case IrOpcode::kFloat64LessThan:
- return ReplaceBool(m.left().Value() < m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <
+ m.right().ResolvedValue());
case IrOpcode::kFloat64LessThanOrEqual:
- return ReplaceBool(m.left().Value() <= m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <=
+ m.right().ResolvedValue());
default:
UNREACHABLE();
}
@@ -1990,12 +2062,12 @@ Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
UNREACHABLE();
}
node->ReplaceInput(
- 0, m.left().HasValue()
- ? Float32Constant(static_cast<float>(m.left().Value()))
+ 0, m.left().HasResolvedValue()
+ ? Float32Constant(static_cast<float>(m.left().ResolvedValue()))
: m.left().InputAt(0));
node->ReplaceInput(
- 1, m.right().HasValue()
- ? Float32Constant(static_cast<float>(m.right().Value()))
+ 1, m.right().HasResolvedValue()
+ ? Float32Constant(static_cast<float>(m.right().ResolvedValue()))
: m.right().InputAt(0));
return Changed(node);
}
@@ -2005,8 +2077,8 @@ Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
Reduction MachineOperatorReducer::ReduceFloat64RoundDown(Node* node) {
DCHECK_EQ(IrOpcode::kFloat64RoundDown, node->opcode());
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) {
- return ReplaceFloat64(std::floor(m.Value()));
+ if (m.HasResolvedValue()) {
+ return ReplaceFloat64(std::floor(m.ResolvedValue()));
}
return NoChange();
}
@@ -2055,12 +2127,12 @@ MachineOperatorReducer::ReduceWord32EqualForConstantRhs(Node* lhs,
typename WordNAdapter::UintNBinopMatcher mand(lhs);
if ((WordNAdapter::IsWordNShr(mand.left()) ||
WordNAdapter::IsWordNSar(mand.left())) &&
- mand.right().HasValue()) {
+ mand.right().HasResolvedValue()) {
typename WordNAdapter::UintNBinopMatcher mshift(mand.left().node());
// ((x >> K1) & K2) == K3 => (x & (K2 << K1)) == (K3 << K1)
- if (mshift.right().HasValue()) {
- auto shift_bits = mshift.right().Value();
- auto mask = mand.right().Value();
+ if (mshift.right().HasResolvedValue()) {
+ auto shift_bits = mshift.right().ResolvedValue();
+ auto mask = mand.right().ResolvedValue();
// Make sure that we won't shift data off the end, and that all of the
// data ends up in the lower 32 bits for 64-bit mode.
if (shift_bits <= base::bits::CountLeadingZeros(mask) &&
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 98befab060..e3d16d7e60 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -32,15 +32,15 @@ std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
return os << rep.representation() << ", " << rep.write_barrier_kind();
}
-size_t hash_value(LoadKind kind) { return static_cast<size_t>(kind); }
+size_t hash_value(MemoryAccessKind kind) { return static_cast<size_t>(kind); }
-std::ostream& operator<<(std::ostream& os, LoadKind kind) {
+std::ostream& operator<<(std::ostream& os, MemoryAccessKind kind) {
switch (kind) {
- case LoadKind::kNormal:
+ case MemoryAccessKind::kNormal:
return os << "kNormal";
- case LoadKind::kUnaligned:
+ case MemoryAccessKind::kUnaligned:
return os << "kUnaligned";
- case LoadKind::kProtected:
+ case MemoryAccessKind::kProtected:
return os << "kProtected";
}
UNREACHABLE();
@@ -50,30 +50,30 @@ size_t hash_value(LoadTransformation rep) { return static_cast<size_t>(rep); }
std::ostream& operator<<(std::ostream& os, LoadTransformation rep) {
switch (rep) {
- case LoadTransformation::kS8x16LoadSplat:
- return os << "kS8x16LoadSplat";
- case LoadTransformation::kS16x8LoadSplat:
- return os << "kS16x8LoadSplat";
- case LoadTransformation::kS32x4LoadSplat:
- return os << "kS32x4LoadSplat";
- case LoadTransformation::kS64x2LoadSplat:
- return os << "kS64x2LoadSplat";
- case LoadTransformation::kI16x8Load8x8S:
- return os << "kI16x8Load8x8S";
- case LoadTransformation::kI16x8Load8x8U:
- return os << "kI16x8Load8x8U";
- case LoadTransformation::kI32x4Load16x4S:
- return os << "kI32x4Load16x4S";
- case LoadTransformation::kI32x4Load16x4U:
- return os << "kI32x4Load16x4U";
- case LoadTransformation::kI64x2Load32x2S:
- return os << "kI64x2Load32x2S";
- case LoadTransformation::kI64x2Load32x2U:
- return os << "kI64x2Load32x2U";
- case LoadTransformation::kS128LoadMem32Zero:
- return os << "kS128LoadMem32Zero";
- case LoadTransformation::kS128LoadMem64Zero:
- return os << "kS128LoadMem64Zero";
+ case LoadTransformation::kS128Load8Splat:
+ return os << "kS128Load8Splat";
+ case LoadTransformation::kS128Load16Splat:
+ return os << "kS128Load16Splat";
+ case LoadTransformation::kS128Load32Splat:
+ return os << "kS128Load32Splat";
+ case LoadTransformation::kS128Load64Splat:
+ return os << "kS128Load64Splat";
+ case LoadTransformation::kS128Load8x8S:
+ return os << "kS128Load8x8S";
+ case LoadTransformation::kS128Load8x8U:
+ return os << "kS128Load8x8U";
+ case LoadTransformation::kS128Load16x4S:
+ return os << "kS128Load16x4S";
+ case LoadTransformation::kS128Load16x4U:
+ return os << "kS128Load16x4U";
+ case LoadTransformation::kS128Load32x2S:
+ return os << "kS128Load32x2S";
+ case LoadTransformation::kS128Load32x2U:
+ return os << "kS128Load32x2U";
+ case LoadTransformation::kS128Load32Zero:
+ return os << "kS128Load32Zero";
+ case LoadTransformation::kS128Load64Zero:
+ return os << "kS128Load64Zero";
}
UNREACHABLE();
}
@@ -99,6 +99,25 @@ bool operator!=(LoadTransformParameters lhs, LoadTransformParameters rhs) {
return !(lhs == rhs);
}
+size_t hash_value(LoadLaneParameters params) {
+ return base::hash_combine(params.kind, params.rep, params.laneidx);
+}
+
+std::ostream& operator<<(std::ostream& os, LoadLaneParameters params) {
+ return os << "(" << params.kind << " " << params.rep << " " << params.laneidx
+ << ")";
+}
+
+LoadLaneParameters const& LoadLaneParametersOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kLoadLane, op->opcode());
+ return OpParameter<LoadLaneParameters>(op);
+}
+
+bool operator==(LoadLaneParameters lhs, LoadLaneParameters rhs) {
+ return lhs.kind == rhs.kind && lhs.rep == rhs.rep &&
+ lhs.laneidx == rhs.laneidx;
+}
+
LoadRepresentation LoadRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kLoad == op->opcode() ||
IrOpcode::kProtectedLoad == op->opcode() ||
@@ -122,6 +141,25 @@ UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
return OpParameter<UnalignedStoreRepresentation>(op);
}
+size_t hash_value(StoreLaneParameters params) {
+ return base::hash_combine(params.kind, params.rep, params.laneidx);
+}
+
+std::ostream& operator<<(std::ostream& os, StoreLaneParameters params) {
+ return os << "(" << params.kind << " " << params.rep << " " << params.laneidx
+ << ")";
+}
+
+StoreLaneParameters const& StoreLaneParametersOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kStoreLane, op->opcode());
+ return OpParameter<StoreLaneParameters>(op);
+}
+
+bool operator==(StoreLaneParameters lhs, StoreLaneParameters rhs) {
+ return lhs.kind == rhs.kind && lhs.rep == rhs.rep &&
+ lhs.laneidx == rhs.laneidx;
+}
+
bool operator==(StackSlotRepresentation lhs, StackSlotRepresentation rhs) {
return lhs.size() == rhs.size() && lhs.alignment() == rhs.alignment();
}
@@ -375,22 +413,23 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I64x2Splat, Operator::kNoProperties, 1, 0, 1) \
V(I64x2SplatI32Pair, Operator::kNoProperties, 2, 0, 1) \
V(I64x2Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2SConvertI32x4Low, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2SConvertI32x4High, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2UConvertI32x4Low, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2UConvertI32x4High, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2BitMask, Operator::kNoProperties, 1, 0, 1) \
V(I64x2Shl, Operator::kNoProperties, 2, 0, 1) \
V(I64x2ShrS, Operator::kNoProperties, 2, 0, 1) \
V(I64x2Add, Operator::kCommutative, 2, 0, 1) \
V(I64x2Sub, Operator::kNoProperties, 2, 0, 1) \
V(I64x2Mul, Operator::kCommutative, 2, 0, 1) \
- V(I64x2MinS, Operator::kCommutative, 2, 0, 1) \
- V(I64x2MaxS, Operator::kCommutative, 2, 0, 1) \
V(I64x2Eq, Operator::kCommutative, 2, 0, 1) \
- V(I64x2Ne, Operator::kCommutative, 2, 0, 1) \
- V(I64x2GtS, Operator::kNoProperties, 2, 0, 1) \
- V(I64x2GeS, Operator::kNoProperties, 2, 0, 1) \
V(I64x2ShrU, Operator::kNoProperties, 2, 0, 1) \
- V(I64x2MinU, Operator::kCommutative, 2, 0, 1) \
- V(I64x2MaxU, Operator::kCommutative, 2, 0, 1) \
- V(I64x2GtU, Operator::kNoProperties, 2, 0, 1) \
- V(I64x2GeU, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2ExtMulLowI32x4S, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2ExtMulHighI32x4S, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2ExtMulLowI32x4U, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2ExtMulHighI32x4U, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2SignSelect, Operator::kNoProperties, 3, 0, 1) \
V(I32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(I32x4SConvertF32x4, Operator::kNoProperties, 1, 0, 1) \
V(I32x4SConvertI16x8Low, Operator::kNoProperties, 1, 0, 1) \
@@ -419,6 +458,13 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I32x4Abs, Operator::kNoProperties, 1, 0, 1) \
V(I32x4BitMask, Operator::kNoProperties, 1, 0, 1) \
V(I32x4DotI16x8S, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4ExtMulLowI16x8S, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4ExtMulHighI16x8S, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4ExtMulLowI16x8U, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4ExtMulHighI16x8U, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4SignSelect, Operator::kNoProperties, 3, 0, 1) \
+ V(I32x4ExtAddPairwiseI16x8S, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4ExtAddPairwiseI16x8U, Operator::kNoProperties, 1, 0, 1) \
V(I16x8Splat, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16Low, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \
@@ -427,10 +473,10 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I16x8ShrS, Operator::kNoProperties, 2, 0, 1) \
V(I16x8SConvertI32x4, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Add, Operator::kCommutative, 2, 0, 1) \
- V(I16x8AddSaturateS, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8AddSatS, Operator::kCommutative, 2, 0, 1) \
V(I16x8AddHoriz, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Sub, Operator::kNoProperties, 2, 0, 1) \
- V(I16x8SubSaturateS, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8SubSatS, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Mul, Operator::kCommutative, 2, 0, 1) \
V(I16x8MinS, Operator::kCommutative, 2, 0, 1) \
V(I16x8MaxS, Operator::kCommutative, 2, 0, 1) \
@@ -442,24 +488,32 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I16x8UConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \
V(I16x8ShrU, Operator::kNoProperties, 2, 0, 1) \
V(I16x8UConvertI32x4, Operator::kNoProperties, 2, 0, 1) \
- V(I16x8AddSaturateU, Operator::kCommutative, 2, 0, 1) \
- V(I16x8SubSaturateU, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8AddSatU, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8SubSatU, Operator::kNoProperties, 2, 0, 1) \
V(I16x8MinU, Operator::kCommutative, 2, 0, 1) \
V(I16x8MaxU, Operator::kCommutative, 2, 0, 1) \
V(I16x8GtU, Operator::kNoProperties, 2, 0, 1) \
V(I16x8GeU, Operator::kNoProperties, 2, 0, 1) \
V(I16x8RoundingAverageU, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8Q15MulRSatS, Operator::kCommutative, 2, 0, 1) \
V(I16x8Abs, Operator::kNoProperties, 1, 0, 1) \
V(I16x8BitMask, Operator::kNoProperties, 1, 0, 1) \
+ V(I16x8ExtMulLowI8x16S, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8ExtMulHighI8x16S, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8ExtMulLowI8x16U, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8ExtMulHighI8x16U, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8SignSelect, Operator::kNoProperties, 3, 0, 1) \
+ V(I16x8ExtAddPairwiseI8x16S, Operator::kNoProperties, 1, 0, 1) \
+ V(I16x8ExtAddPairwiseI8x16U, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Splat, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Neg, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Shl, Operator::kNoProperties, 2, 0, 1) \
V(I8x16ShrS, Operator::kNoProperties, 2, 0, 1) \
V(I8x16SConvertI16x8, Operator::kNoProperties, 2, 0, 1) \
V(I8x16Add, Operator::kCommutative, 2, 0, 1) \
- V(I8x16AddSaturateS, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16AddSatS, Operator::kCommutative, 2, 0, 1) \
V(I8x16Sub, Operator::kNoProperties, 2, 0, 1) \
- V(I8x16SubSaturateS, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16SubSatS, Operator::kNoProperties, 2, 0, 1) \
V(I8x16Mul, Operator::kCommutative, 2, 0, 1) \
V(I8x16MinS, Operator::kCommutative, 2, 0, 1) \
V(I8x16MaxS, Operator::kCommutative, 2, 0, 1) \
@@ -469,15 +523,17 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I8x16GeS, Operator::kNoProperties, 2, 0, 1) \
V(I8x16ShrU, Operator::kNoProperties, 2, 0, 1) \
V(I8x16UConvertI16x8, Operator::kNoProperties, 2, 0, 1) \
- V(I8x16AddSaturateU, Operator::kCommutative, 2, 0, 1) \
- V(I8x16SubSaturateU, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16AddSatU, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16SubSatU, Operator::kNoProperties, 2, 0, 1) \
V(I8x16MinU, Operator::kCommutative, 2, 0, 1) \
V(I8x16MaxU, Operator::kCommutative, 2, 0, 1) \
V(I8x16GtU, Operator::kNoProperties, 2, 0, 1) \
V(I8x16GeU, Operator::kNoProperties, 2, 0, 1) \
V(I8x16RoundingAverageU, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16Popcnt, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Abs, Operator::kNoProperties, 1, 0, 1) \
V(I8x16BitMask, Operator::kNoProperties, 1, 0, 1) \
+ V(I8x16SignSelect, Operator::kNoProperties, 3, 0, 1) \
V(S128Load, Operator::kNoProperties, 2, 0, 1) \
V(S128Store, Operator::kNoProperties, 3, 0, 1) \
V(S128Zero, Operator::kNoProperties, 0, 0, 1) \
@@ -487,8 +543,6 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(S128Not, Operator::kNoProperties, 1, 0, 1) \
V(S128Select, Operator::kNoProperties, 3, 0, 1) \
V(S128AndNot, Operator::kNoProperties, 2, 0, 1) \
- V(V64x2AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(V64x2AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(V32x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(V32x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(V16x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
@@ -563,18 +617,18 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(kCompressed)
#define LOAD_TRANSFORM_LIST(V) \
- V(S8x16LoadSplat) \
- V(S16x8LoadSplat) \
- V(S32x4LoadSplat) \
- V(S64x2LoadSplat) \
- V(I16x8Load8x8S) \
- V(I16x8Load8x8U) \
- V(I32x4Load16x4S) \
- V(I32x4Load16x4U) \
- V(I64x2Load32x2S) \
- V(I64x2Load32x2U) \
- V(S128LoadMem32Zero) \
- V(S128LoadMem64Zero)
+ V(S128Load8Splat) \
+ V(S128Load16Splat) \
+ V(S128Load32Splat) \
+ V(S128Load64Splat) \
+ V(S128Load8x8S) \
+ V(S128Load8x8U) \
+ V(S128Load16x4S) \
+ V(S128Load16x4U) \
+ V(S128Load32x2S) \
+ V(S128Load32x2U) \
+ V(S128Load32Zero) \
+ V(S128Load64Zero)
#define ATOMIC_U32_TYPE_LIST(V) \
V(Uint8) \
@@ -608,6 +662,15 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I16x8, 8) \
V(I8x16, 16)
+#define SIMD_I64x2_LANES(V) V(0) V(1)
+
+#define SIMD_I32x4_LANES(V) SIMD_I64x2_LANES(V) V(2) V(3)
+
+#define SIMD_I16x8_LANES(V) SIMD_I32x4_LANES(V) V(4) V(5) V(6) V(7)
+
+#define SIMD_I8x16_LANES(V) \
+ SIMD_I16x8_LANES(V) V(8) V(9) V(10) V(11) V(12) V(13) V(14) V(15)
+
#define STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(V) \
V(4, 0) V(8, 0) V(16, 0) V(4, 4) V(8, 8) V(16, 16)
@@ -747,14 +810,30 @@ struct ProtectedLoadOperator : public Operator1<LoadRepresentation> {
1, 1, 1, 1, 0, LoadRepresentation(rep, sem)) {}
};
-template <LoadKind kind, LoadTransformation type>
+template <MemoryAccessKind kind, LoadTransformation type>
struct LoadTransformOperator : public Operator1<LoadTransformParameters> {
LoadTransformOperator()
- : Operator1(IrOpcode::kLoadTransform, Operator::kEliminatable,
+ : Operator1(IrOpcode::kLoadTransform,
+ kind == MemoryAccessKind::kProtected
+ ? Operator::kNoDeopt | Operator::kNoThrow
+ : Operator::kEliminatable,
"LoadTransform", 2, 1, 1, 1, 1, 0,
LoadTransformParameters{kind, type}) {}
};
+template <MemoryAccessKind kind, MachineRepresentation rep, MachineSemantic sem,
+ uint8_t laneidx>
+struct LoadLaneOperator : public Operator1<LoadLaneParameters> {
+ LoadLaneOperator()
+ : Operator1(
+ IrOpcode::kLoadLane,
+ kind == MemoryAccessKind::kProtected
+ ? Operator::kNoDeopt | Operator::kNoThrow
+ : Operator::kEliminatable,
+ "LoadLane", 3, 1, 1, 1, 1, 0,
+ LoadLaneParameters{kind, LoadRepresentation(rep, sem), laneidx}) {}
+};
+
template <MachineRepresentation rep, WriteBarrierKind write_barrier_kind>
struct StoreOperator : public Operator1<StoreRepresentation> {
StoreOperator()
@@ -781,6 +860,15 @@ struct ProtectedStoreOperator : public Operator1<StoreRepresentation> {
StoreRepresentation(rep, kNoWriteBarrier)) {}
};
+template <MemoryAccessKind kind, MachineRepresentation rep, uint8_t laneidx>
+struct StoreLaneOperator : public Operator1<StoreLaneParameters> {
+ StoreLaneOperator()
+ : Operator1(IrOpcode::kStoreLane,
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
+ "StoreLane", 3, 1, 1, 0, 1, 0,
+ StoreLaneParameters{kind, rep, laneidx}) {}
+};
+
template <MachineRepresentation rep, MachineSemantic sem>
struct Word32AtomicLoadOperator : public Operator1<LoadRepresentation> {
Word32AtomicLoadOperator()
@@ -1109,11 +1197,12 @@ const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
}
const Operator* MachineOperatorBuilder::LoadTransform(
- LoadKind kind, LoadTransformation transform) {
-#define LOAD_TRANSFORM_KIND(TYPE, KIND) \
- if (kind == LoadKind::k##KIND && transform == LoadTransformation::k##TYPE) { \
- return GetCachedOperator<LoadTransformOperator< \
- LoadKind::k##KIND, LoadTransformation::k##TYPE>>(); \
+ MemoryAccessKind kind, LoadTransformation transform) {
+#define LOAD_TRANSFORM_KIND(TYPE, KIND) \
+ if (kind == MemoryAccessKind::k##KIND && \
+ transform == LoadTransformation::k##TYPE) { \
+ return GetCachedOperator<LoadTransformOperator< \
+ MemoryAccessKind::k##KIND, LoadTransformation::k##TYPE>>(); \
}
#define LOAD_TRANSFORM(TYPE) \
LOAD_TRANSFORM_KIND(TYPE, Normal) \
@@ -1126,6 +1215,73 @@ const Operator* MachineOperatorBuilder::LoadTransform(
UNREACHABLE();
}
+const Operator* MachineOperatorBuilder::LoadLane(MemoryAccessKind kind,
+ LoadRepresentation rep,
+ uint8_t laneidx) {
+#define LOAD_LANE_KIND(TYPE, KIND, LANEIDX) \
+ if (kind == MemoryAccessKind::k##KIND && rep == MachineType::TYPE() && \
+ laneidx == LANEIDX) { \
+ return GetCachedOperator<LoadLaneOperator< \
+ MemoryAccessKind::k##KIND, MachineType::TYPE().representation(), \
+ MachineType::TYPE().semantic(), LANEIDX>>(); \
+ }
+
+#define LOAD_LANE_T(T, LANE) \
+ LOAD_LANE_KIND(T, Normal, LANE) \
+ LOAD_LANE_KIND(T, Unaligned, LANE) \
+ LOAD_LANE_KIND(T, Protected, LANE)
+
+#define LOAD_LANE_INT8(LANE) LOAD_LANE_T(Int8, LANE)
+#define LOAD_LANE_INT16(LANE) LOAD_LANE_T(Int16, LANE)
+#define LOAD_LANE_INT32(LANE) LOAD_LANE_T(Int32, LANE)
+#define LOAD_LANE_INT64(LANE) LOAD_LANE_T(Int64, LANE)
+
+ // Semicolons unnecessary, but helps formatting.
+ SIMD_I8x16_LANES(LOAD_LANE_INT8);
+ SIMD_I16x8_LANES(LOAD_LANE_INT16);
+ SIMD_I32x4_LANES(LOAD_LANE_INT32);
+ SIMD_I64x2_LANES(LOAD_LANE_INT64);
+#undef LOAD_LANE_INT8
+#undef LOAD_LANE_INT16
+#undef LOAD_LANE_INT32
+#undef LOAD_LANE_INT64
+#undef LOAD_LANE_KIND
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::StoreLane(MemoryAccessKind kind,
+ MachineRepresentation rep,
+ uint8_t laneidx) {
+#define STORE_LANE_KIND(REP, KIND, LANEIDX) \
+ if (kind == MemoryAccessKind::k##KIND && \
+ rep == MachineRepresentation::REP && laneidx == LANEIDX) { \
+ return GetCachedOperator<StoreLaneOperator< \
+ MemoryAccessKind::k##KIND, MachineRepresentation::REP, LANEIDX>>(); \
+ }
+
+#define STORE_LANE_T(T, LANE) \
+ STORE_LANE_KIND(T, Normal, LANE) \
+ STORE_LANE_KIND(T, Unaligned, LANE) \
+ STORE_LANE_KIND(T, Protected, LANE)
+
+#define STORE_LANE_WORD8(LANE) STORE_LANE_T(kWord8, LANE)
+#define STORE_LANE_WORD16(LANE) STORE_LANE_T(kWord16, LANE)
+#define STORE_LANE_WORD32(LANE) STORE_LANE_T(kWord32, LANE)
+#define STORE_LANE_WORD64(LANE) STORE_LANE_T(kWord64, LANE)
+
+ // Semicolons unnecessary, but helps formatting.
+ SIMD_I8x16_LANES(STORE_LANE_WORD8);
+ SIMD_I16x8_LANES(STORE_LANE_WORD16);
+ SIMD_I32x4_LANES(STORE_LANE_WORD32);
+ SIMD_I64x2_LANES(STORE_LANE_WORD64);
+#undef STORE_LANE_WORD8
+#undef STORE_LANE_WORD16
+#undef STORE_LANE_WORD32
+#undef STORE_LANE_WORD64
+#undef STORE_LANE_KIND
+ UNREACHABLE();
+}
+
const Operator* MachineOperatorBuilder::StackSlot(int size, int alignment) {
DCHECK_LE(0, size);
DCHECK(alignment == 0 || alignment == 4 || alignment == 8 || alignment == 16);
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 8c373fd6ca..7912c55de5 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -49,29 +49,29 @@ using LoadRepresentation = MachineType;
V8_EXPORT_PRIVATE LoadRepresentation LoadRepresentationOf(Operator const*)
V8_WARN_UNUSED_RESULT;
-enum class LoadKind {
+enum class MemoryAccessKind {
kNormal,
kUnaligned,
kProtected,
};
-size_t hash_value(LoadKind);
+size_t hash_value(MemoryAccessKind);
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, LoadKind);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, MemoryAccessKind);
enum class LoadTransformation {
- kS8x16LoadSplat,
- kS16x8LoadSplat,
- kS32x4LoadSplat,
- kS64x2LoadSplat,
- kI16x8Load8x8S,
- kI16x8Load8x8U,
- kI32x4Load16x4S,
- kI32x4Load16x4U,
- kI64x2Load32x2S,
- kI64x2Load32x2U,
- kS128LoadMem32Zero,
- kS128LoadMem64Zero,
+ kS128Load8Splat,
+ kS128Load16Splat,
+ kS128Load32Splat,
+ kS128Load64Splat,
+ kS128Load8x8S,
+ kS128Load8x8U,
+ kS128Load16x4S,
+ kS128Load16x4U,
+ kS128Load32x2S,
+ kS128Load32x2U,
+ kS128Load32Zero,
+ kS128Load64Zero,
};
size_t hash_value(LoadTransformation);
@@ -79,7 +79,7 @@ size_t hash_value(LoadTransformation);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, LoadTransformation);
struct LoadTransformParameters {
- LoadKind kind;
+ MemoryAccessKind kind;
LoadTransformation transformation;
};
@@ -91,6 +91,17 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
V8_EXPORT_PRIVATE LoadTransformParameters const& LoadTransformParametersOf(
Operator const*) V8_WARN_UNUSED_RESULT;
+struct LoadLaneParameters {
+ MemoryAccessKind kind;
+ LoadRepresentation rep;
+ uint8_t laneidx;
+};
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, LoadLaneParameters);
+
+V8_EXPORT_PRIVATE LoadLaneParameters const& LoadLaneParametersOf(
+ Operator const*) V8_WARN_UNUSED_RESULT;
+
// A Store needs a MachineType and a WriteBarrierKind in order to emit the
// correct write barrier.
class StoreRepresentation final {
@@ -124,6 +135,17 @@ using UnalignedStoreRepresentation = MachineRepresentation;
UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
Operator const*) V8_WARN_UNUSED_RESULT;
+struct StoreLaneParameters {
+ MemoryAccessKind kind;
+ MachineRepresentation rep;
+ uint8_t laneidx;
+};
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, StoreLaneParameters);
+
+V8_EXPORT_PRIVATE StoreLaneParameters const& StoreLaneParametersOf(
+ Operator const*) V8_WARN_UNUSED_RESULT;
+
class StackSlotRepresentation final {
public:
StackSlotRepresentation(int size, int alignment)
@@ -306,6 +328,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
AlignmentRequirements alignmentRequirements =
AlignmentRequirements::FullUnalignedAccessSupport());
+ MachineOperatorBuilder(const MachineOperatorBuilder&) = delete;
+ MachineOperatorBuilder& operator=(const MachineOperatorBuilder&) = delete;
+
const Operator* Comment(const char* msg);
const Operator* AbortCSAAssert();
const Operator* DebugBreak();
@@ -637,22 +662,23 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I64x2ReplaceLane(int32_t);
const Operator* I64x2ReplaceLaneI32Pair(int32_t);
const Operator* I64x2Neg();
+ const Operator* I64x2SConvertI32x4Low();
+ const Operator* I64x2SConvertI32x4High();
+ const Operator* I64x2UConvertI32x4Low();
+ const Operator* I64x2UConvertI32x4High();
+ const Operator* I64x2BitMask();
const Operator* I64x2Shl();
const Operator* I64x2ShrS();
const Operator* I64x2Add();
const Operator* I64x2Sub();
const Operator* I64x2Mul();
- const Operator* I64x2MinS();
- const Operator* I64x2MaxS();
const Operator* I64x2Eq();
- const Operator* I64x2Ne();
- const Operator* I64x2GtS();
- const Operator* I64x2GeS();
const Operator* I64x2ShrU();
- const Operator* I64x2MinU();
- const Operator* I64x2MaxU();
- const Operator* I64x2GtU();
- const Operator* I64x2GeU();
+ const Operator* I64x2ExtMulLowI32x4S();
+ const Operator* I64x2ExtMulHighI32x4S();
+ const Operator* I64x2ExtMulLowI32x4U();
+ const Operator* I64x2ExtMulHighI32x4U();
+ const Operator* I64x2SignSelect();
const Operator* I32x4Splat();
const Operator* I32x4ExtractLane(int32_t);
@@ -685,6 +711,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I32x4Abs();
const Operator* I32x4BitMask();
const Operator* I32x4DotI16x8S();
+ const Operator* I32x4ExtMulLowI16x8S();
+ const Operator* I32x4ExtMulHighI16x8S();
+ const Operator* I32x4ExtMulLowI16x8U();
+ const Operator* I32x4ExtMulHighI16x8U();
+ const Operator* I32x4SignSelect();
+ const Operator* I32x4ExtAddPairwiseI16x8S();
+ const Operator* I32x4ExtAddPairwiseI16x8U();
const Operator* I16x8Splat();
const Operator* I16x8ExtractLaneU(int32_t);
@@ -697,10 +730,10 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I16x8ShrS();
const Operator* I16x8SConvertI32x4();
const Operator* I16x8Add();
- const Operator* I16x8AddSaturateS();
+ const Operator* I16x8AddSatS();
const Operator* I16x8AddHoriz();
const Operator* I16x8Sub();
- const Operator* I16x8SubSaturateS();
+ const Operator* I16x8SubSatS();
const Operator* I16x8Mul();
const Operator* I16x8MinS();
const Operator* I16x8MaxS();
@@ -713,15 +746,23 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I16x8UConvertI8x16High();
const Operator* I16x8ShrU();
const Operator* I16x8UConvertI32x4();
- const Operator* I16x8AddSaturateU();
- const Operator* I16x8SubSaturateU();
+ const Operator* I16x8AddSatU();
+ const Operator* I16x8SubSatU();
const Operator* I16x8MinU();
const Operator* I16x8MaxU();
const Operator* I16x8GtU();
const Operator* I16x8GeU();
const Operator* I16x8RoundingAverageU();
+ const Operator* I16x8Q15MulRSatS();
const Operator* I16x8Abs();
const Operator* I16x8BitMask();
+ const Operator* I16x8ExtMulLowI8x16S();
+ const Operator* I16x8ExtMulHighI8x16S();
+ const Operator* I16x8ExtMulLowI8x16U();
+ const Operator* I16x8ExtMulHighI8x16U();
+ const Operator* I16x8SignSelect();
+ const Operator* I16x8ExtAddPairwiseI8x16S();
+ const Operator* I16x8ExtAddPairwiseI8x16U();
const Operator* I8x16Splat();
const Operator* I8x16ExtractLaneU(int32_t);
@@ -732,9 +773,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I8x16ShrS();
const Operator* I8x16SConvertI16x8();
const Operator* I8x16Add();
- const Operator* I8x16AddSaturateS();
+ const Operator* I8x16AddSatS();
const Operator* I8x16Sub();
- const Operator* I8x16SubSaturateS();
+ const Operator* I8x16SubSatS();
const Operator* I8x16Mul();
const Operator* I8x16MinS();
const Operator* I8x16MaxS();
@@ -745,15 +786,17 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I8x16ShrU();
const Operator* I8x16UConvertI16x8();
- const Operator* I8x16AddSaturateU();
- const Operator* I8x16SubSaturateU();
+ const Operator* I8x16AddSatU();
+ const Operator* I8x16SubSatU();
const Operator* I8x16MinU();
const Operator* I8x16MaxU();
const Operator* I8x16GtU();
const Operator* I8x16GeU();
const Operator* I8x16RoundingAverageU();
+ const Operator* I8x16Popcnt();
const Operator* I8x16Abs();
const Operator* I8x16BitMask();
+ const Operator* I8x16SignSelect();
const Operator* S128Load();
const Operator* S128Store();
@@ -770,8 +813,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I8x16Swizzle();
const Operator* I8x16Shuffle(const uint8_t shuffle[16]);
- const Operator* V64x2AnyTrue();
- const Operator* V64x2AllTrue();
const Operator* V32x4AnyTrue();
const Operator* V32x4AllTrue();
const Operator* V16x8AnyTrue();
@@ -784,12 +825,21 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* PoisonedLoad(LoadRepresentation rep);
const Operator* ProtectedLoad(LoadRepresentation rep);
- const Operator* LoadTransform(LoadKind kind, LoadTransformation transform);
+ const Operator* LoadTransform(MemoryAccessKind kind,
+ LoadTransformation transform);
+
+ // SIMD load: replace a specified lane with [base + index].
+ const Operator* LoadLane(MemoryAccessKind kind, LoadRepresentation rep,
+ uint8_t laneidx);
// store [base + index], value
const Operator* Store(StoreRepresentation rep);
const Operator* ProtectedStore(MachineRepresentation rep);
+ // SIMD store: store a specified lane of value into [base + index].
+ const Operator* StoreLane(MemoryAccessKind kind, MachineRepresentation rep,
+ uint8_t laneidx);
+
// unaligned load [base + index]
const Operator* UnalignedLoad(LoadRepresentation rep);
@@ -931,8 +981,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
MachineRepresentation const word_;
Flags const flags_;
AlignmentRequirements const alignment_requirements_;
-
- DISALLOW_COPY_AND_ASSIGN(MachineOperatorBuilder);
};
diff --git a/deps/v8/src/compiler/map-inference.cc b/deps/v8/src/compiler/map-inference.cc
index 3e48a95405..1e1a59d784 100644
--- a/deps/v8/src/compiler/map-inference.cc
+++ b/deps/v8/src/compiler/map-inference.cc
@@ -19,12 +19,12 @@ MapInference::MapInference(JSHeapBroker* broker, Node* object, Node* effect)
: broker_(broker), object_(object) {
ZoneHandleSet<Map> maps;
auto result =
- NodeProperties::InferReceiverMapsUnsafe(broker_, object_, effect, &maps);
+ NodeProperties::InferMapsUnsafe(broker_, object_, effect, &maps);
maps_.insert(maps_.end(), maps.begin(), maps.end());
- maps_state_ = (result == NodeProperties::kUnreliableReceiverMaps)
+ maps_state_ = (result == NodeProperties::kUnreliableMaps)
? kUnreliableDontNeedGuard
: kReliableOrGuarded;
- DCHECK_EQ(maps_.empty(), result == NodeProperties::kNoReceiverMaps);
+ DCHECK_EQ(maps_.empty(), result == NodeProperties::kNoMaps);
}
MapInference::~MapInference() { CHECK(Safe()); }
diff --git a/deps/v8/src/compiler/memory-lowering.cc b/deps/v8/src/compiler/memory-lowering.cc
index a1b68d48e3..21a0169f2e 100644
--- a/deps/v8/src/compiler/memory-lowering.cc
+++ b/deps/v8/src/compiler/memory-lowering.cc
@@ -98,6 +98,10 @@ Reduction MemoryLowering::ReduceAllocateRaw(
DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
DCHECK_IMPLIES(allocation_folding_ == AllocationFolding::kDoAllocationFolding,
state_ptr != nullptr);
+ // Code objects may have a maximum size smaller than kMaxHeapObjectSize due to
+ // guard pages. If we need to support allocating code here we would need to
+ // call MemoryChunkLayout::MaxRegularCodeObjectSize() at runtime.
+ DCHECK_NE(allocation_type, AllocationType::kCode);
Node* value;
Node* size = node->InputAt(0);
Node* effect = node->InputAt(1);
@@ -135,7 +139,7 @@ Reduction MemoryLowering::ReduceAllocateRaw(
IntPtrMatcher m(size);
if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new &&
allocation_folding_ == AllocationFolding::kDoAllocationFolding) {
- intptr_t const object_size = m.Value();
+ intptr_t const object_size = m.ResolvedValue();
AllocationState const* state = *state_ptr;
if (state->size() <= kMaxRegularHeapObjectSize - object_size &&
state->group()->allocation() == allocation_type) {
@@ -306,7 +310,9 @@ Reduction MemoryLowering::ReduceLoadElement(Node* node) {
return Changed(node);
}
-Node* MemoryLowering::DecodeExternalPointer(Node* node) {
+Node* MemoryLowering::DecodeExternalPointer(
+ Node* node, ExternalPointerTag external_pointer_tag) {
+#ifdef V8_HEAP_SANDBOX
DCHECK(V8_HEAP_SANDBOX_BOOL);
DCHECK(node->opcode() == IrOpcode::kLoad ||
node->opcode() == IrOpcode::kPoisonedLoad);
@@ -317,16 +323,29 @@ Node* MemoryLowering::DecodeExternalPointer(Node* node) {
// Clone the load node and put it here.
// TODO(turbofan): consider adding GraphAssembler::Clone() suitable for
// cloning nodes from arbitrary locaions in effect/control chains.
- Node* node_copy = __ AddNode(graph()->CloneNode(node));
+ Node* index = __ AddNode(graph()->CloneNode(node));
// Uncomment this to generate a breakpoint for debugging purposes.
// __ DebugBreak();
- // Decode loaded enternal pointer.
+ // Decode loaded external pointer.
STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
- Node* salt = __ IntPtrConstant(kExternalPointerSalt);
- Node* decoded_ptr = __ WordXor(node_copy, salt);
+ Node* external_pointer_table_address = __ ExternalConstant(
+ ExternalReference::external_pointer_table_address(isolate()));
+ Node* table = __ Load(MachineType::Pointer(), external_pointer_table_address,
+ Internals::kExternalPointerTableBufferOffset);
+ // TODO(v8:10391, saelo): bounds check if table is not caged
+ Node* offset = __ Int32Mul(index, __ Int32Constant(8));
+ Node* decoded_ptr =
+ __ Load(MachineType::Pointer(), table, __ ChangeUint32ToUint64(offset));
+ if (external_pointer_tag != 0) {
+ Node* tag = __ IntPtrConstant(external_pointer_tag);
+ decoded_ptr = __ WordXor(decoded_ptr, tag);
+ }
return decoded_ptr;
+#else
+ return node;
+#endif // V8_HEAP_SANDBOX
}
Reduction MemoryLowering::ReduceLoadField(Node* node) {
@@ -335,6 +354,11 @@ Reduction MemoryLowering::ReduceLoadField(Node* node) {
Node* offset = __ IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph_zone(), 1, offset);
MachineType type = access.machine_type;
+ if (V8_HEAP_SANDBOX_BOOL &&
+ access.type.Is(Type::SandboxedExternalPointer())) {
+ // External pointer table indices are 32bit numbers
+ type = MachineType::Uint32();
+ }
if (NeedsPoisoning(access.load_sensitivity)) {
NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
@@ -342,7 +366,12 @@ Reduction MemoryLowering::ReduceLoadField(Node* node) {
}
if (V8_HEAP_SANDBOX_BOOL &&
access.type.Is(Type::SandboxedExternalPointer())) {
- node = DecodeExternalPointer(node);
+#ifdef V8_HEAP_SANDBOX
+ ExternalPointerTag tag = access.external_pointer_tag;
+#else
+ ExternalPointerTag tag = kExternalPointerNullTag;
+#endif
+ node = DecodeExternalPointer(node, tag);
return Replace(node);
} else {
DCHECK(!access.type.Is(Type::SandboxedExternalPointer()));
diff --git a/deps/v8/src/compiler/memory-lowering.h b/deps/v8/src/compiler/memory-lowering.h
index 7990b1715c..7ad02b95af 100644
--- a/deps/v8/src/compiler/memory-lowering.h
+++ b/deps/v8/src/compiler/memory-lowering.h
@@ -32,6 +32,9 @@ class MemoryLowering final : public Reducer {
// An allocation state is propagated on the effect paths through the graph.
class AllocationState final : public ZoneObject {
public:
+ AllocationState(const AllocationState&) = delete;
+ AllocationState& operator=(const AllocationState&) = delete;
+
static AllocationState const* Empty(Zone* zone) {
return zone->New<AllocationState>();
}
@@ -65,8 +68,6 @@ class MemoryLowering final : public Reducer {
intptr_t const size_;
Node* const top_;
Node* const effect_;
-
- DISALLOW_COPY_AND_ASSIGN(AllocationState);
};
using WriteBarrierAssertFailedCallback = std::function<void(
@@ -108,7 +109,7 @@ class MemoryLowering final : public Reducer {
Node* value,
AllocationState const* state,
WriteBarrierKind);
- Node* DecodeExternalPointer(Node* encoded_pointer);
+ Node* DecodeExternalPointer(Node* encoded_pointer, ExternalPointerTag tag);
Node* ComputeIndex(ElementAccess const& access, Node* node);
bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
diff --git a/deps/v8/src/compiler/node-cache.h b/deps/v8/src/compiler/node-cache.h
index 935e5778e3..8e1d3d4eae 100644
--- a/deps/v8/src/compiler/node-cache.h
+++ b/deps/v8/src/compiler/node-cache.h
@@ -33,6 +33,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) NodeCache final {
public:
explicit NodeCache(Zone* zone) : map_(zone) {}
~NodeCache() = default;
+ NodeCache(const NodeCache&) = delete;
+ NodeCache& operator=(const NodeCache&) = delete;
// Search for node associated with {key} and return a pointer to a memory
// location in this cache that stores an entry for the key. If the location
@@ -50,8 +52,6 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) NodeCache final {
private:
ZoneUnorderedMap<Key, Node*, Hash, Pred> map_;
-
- DISALLOW_COPY_AND_ASSIGN(NodeCache);
};
// Various default cache types.
diff --git a/deps/v8/src/compiler/node-marker.h b/deps/v8/src/compiler/node-marker.h
index e38105dd8b..df4eac97a9 100644
--- a/deps/v8/src/compiler/node-marker.h
+++ b/deps/v8/src/compiler/node-marker.h
@@ -19,6 +19,8 @@ class Graph;
class NodeMarkerBase {
public:
NodeMarkerBase(Graph* graph, uint32_t num_states);
+ NodeMarkerBase(const NodeMarkerBase&) = delete;
+ NodeMarkerBase& operator=(const NodeMarkerBase&) = delete;
V8_INLINE Mark Get(const Node* node) {
Mark mark = node->mark();
@@ -37,8 +39,6 @@ class NodeMarkerBase {
private:
Mark const mark_min_;
Mark const mark_max_;
-
- DISALLOW_COPY_AND_ASSIGN(NodeMarkerBase);
};
// A NodeMarker assigns a local "state" to every node of a graph in constant
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index bd93b545e1..c9736c1d48 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -11,6 +11,7 @@
#include "src/base/compiler-specific.h"
#include "src/codegen/external-reference.h"
#include "src/common/globals.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/numbers/double.h"
@@ -48,96 +49,89 @@ struct NodeMatcher {
Node* node_;
};
+inline Node* SkipValueIdentities(Node* node) {
+#ifdef DEBUG
+ bool seen_fold_constant = false;
+#endif
+ do {
+#ifdef DEBUG
+ if (node->opcode() == IrOpcode::kFoldConstant) {
+ DCHECK(!seen_fold_constant);
+ seen_fold_constant = true;
+ }
+#endif
+ } while (NodeProperties::IsValueIdentity(node, &node));
+ DCHECK_NOT_NULL(node);
+ return node;
+}
// A pattern matcher for abitrary value constants.
+//
+// Note that value identities on the input node are skipped when matching. The
+// resolved value may not be a parameter of the input node. The node() method
+// returns the unmodified input node. This is by design, as reducers may wish to
+// match value constants but delay reducing the node until a later phase. For
+// example, binary operator reducers may opt to keep FoldConstant operands while
+// applying a reduction that match on the constant value of the FoldConstant.
template <typename T, IrOpcode::Value kOpcode>
struct ValueMatcher : public NodeMatcher {
using ValueType = T;
- explicit ValueMatcher(Node* node) : NodeMatcher(node) {
- static_assert(kOpcode != IrOpcode::kFoldConstant, "unsupported opcode");
- if (node->opcode() == IrOpcode::kFoldConstant) {
- node = node->InputAt(1);
- }
- DCHECK_NE(node->opcode(), IrOpcode::kFoldConstant);
- has_value_ = opcode() == kOpcode;
- if (has_value_) {
- value_ = OpParameter<T>(node->op());
+ explicit ValueMatcher(Node* node)
+ : NodeMatcher(node), resolved_value_(), has_resolved_value_(false) {
+ node = SkipValueIdentities(node);
+ has_resolved_value_ = node->opcode() == kOpcode;
+ if (has_resolved_value_) {
+ resolved_value_ = OpParameter<T>(node->op());
}
}
- bool HasValue() const { return has_value_; }
- const T& Value() const {
- DCHECK(HasValue());
- return value_;
+ bool HasResolvedValue() const { return has_resolved_value_; }
+ const T& ResolvedValue() const {
+ CHECK(HasResolvedValue());
+ return resolved_value_;
}
private:
- T value_;
- bool has_value_;
+ T resolved_value_;
+ bool has_resolved_value_;
};
-
template <>
inline ValueMatcher<uint32_t, IrOpcode::kInt32Constant>::ValueMatcher(
Node* node)
- : NodeMatcher(node),
- value_(),
- has_value_(opcode() == IrOpcode::kInt32Constant) {
- if (has_value_) {
- value_ = static_cast<uint32_t>(OpParameter<int32_t>(node->op()));
+ : NodeMatcher(node), resolved_value_(), has_resolved_value_(false) {
+ node = SkipValueIdentities(node);
+ has_resolved_value_ = node->opcode() == IrOpcode::kInt32Constant;
+ if (has_resolved_value_) {
+ resolved_value_ = static_cast<uint32_t>(OpParameter<int32_t>(node->op()));
}
}
-
template <>
inline ValueMatcher<int64_t, IrOpcode::kInt64Constant>::ValueMatcher(Node* node)
- : NodeMatcher(node), value_(), has_value_(false) {
- if (opcode() == IrOpcode::kInt32Constant) {
- value_ = OpParameter<int32_t>(node->op());
- has_value_ = true;
- } else if (opcode() == IrOpcode::kInt64Constant) {
- value_ = OpParameter<int64_t>(node->op());
- has_value_ = true;
+ : NodeMatcher(node), resolved_value_(), has_resolved_value_(false) {
+ node = SkipValueIdentities(node);
+ if (node->opcode() == IrOpcode::kInt32Constant) {
+ resolved_value_ = OpParameter<int32_t>(node->op());
+ has_resolved_value_ = true;
+ } else if (node->opcode() == IrOpcode::kInt64Constant) {
+ resolved_value_ = OpParameter<int64_t>(node->op());
+ has_resolved_value_ = true;
}
}
-
template <>
inline ValueMatcher<uint64_t, IrOpcode::kInt64Constant>::ValueMatcher(
Node* node)
- : NodeMatcher(node), value_(), has_value_(false) {
- if (opcode() == IrOpcode::kInt32Constant) {
- value_ = static_cast<uint32_t>(OpParameter<int32_t>(node->op()));
- has_value_ = true;
- } else if (opcode() == IrOpcode::kInt64Constant) {
- value_ = static_cast<uint64_t>(OpParameter<int64_t>(node->op()));
- has_value_ = true;
- }
-}
-
-template <>
-inline ValueMatcher<double, IrOpcode::kNumberConstant>::ValueMatcher(Node* node)
- : NodeMatcher(node), value_(), has_value_(false) {
- if (node->opcode() == IrOpcode::kNumberConstant) {
- value_ = OpParameter<double>(node->op());
- has_value_ = true;
- } else if (node->opcode() == IrOpcode::kFoldConstant) {
- node = node->InputAt(1);
- DCHECK_NE(node->opcode(), IrOpcode::kFoldConstant);
- }
-}
-
-template <>
-inline ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant>::ValueMatcher(
- Node* node)
- : NodeMatcher(node), value_(), has_value_(false) {
- if (node->opcode() == IrOpcode::kHeapConstant) {
- value_ = OpParameter<Handle<HeapObject>>(node->op());
- has_value_ = true;
- } else if (node->opcode() == IrOpcode::kFoldConstant) {
- node = node->InputAt(1);
- DCHECK_NE(node->opcode(), IrOpcode::kFoldConstant);
+ : NodeMatcher(node), resolved_value_(), has_resolved_value_(false) {
+ node = SkipValueIdentities(node);
+ if (node->opcode() == IrOpcode::kInt32Constant) {
+ resolved_value_ = static_cast<uint32_t>(OpParameter<int32_t>(node->op()));
+ has_resolved_value_ = true;
+ } else if (node->opcode() == IrOpcode::kInt64Constant) {
+ resolved_value_ = static_cast<uint64_t>(OpParameter<int64_t>(node->op()));
+ has_resolved_value_ = true;
}
}
@@ -147,24 +141,27 @@ struct IntMatcher final : public ValueMatcher<T, kOpcode> {
explicit IntMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
bool Is(const T& value) const {
- return this->HasValue() && this->Value() == value;
+ return this->HasResolvedValue() && this->ResolvedValue() == value;
}
bool IsInRange(const T& low, const T& high) const {
- return this->HasValue() && low <= this->Value() && this->Value() <= high;
+ return this->HasResolvedValue() && low <= this->ResolvedValue() &&
+ this->ResolvedValue() <= high;
}
bool IsMultipleOf(T n) const {
- return this->HasValue() && (this->Value() % n) == 0;
+ return this->HasResolvedValue() && (this->ResolvedValue() % n) == 0;
}
bool IsPowerOf2() const {
- return this->HasValue() && this->Value() > 0 &&
- (this->Value() & (this->Value() - 1)) == 0;
+ return this->HasResolvedValue() && this->ResolvedValue() > 0 &&
+ (this->ResolvedValue() & (this->ResolvedValue() - 1)) == 0;
}
bool IsNegativePowerOf2() const {
- return this->HasValue() && this->Value() < 0 &&
- ((this->Value() == std::numeric_limits<T>::min()) ||
- (-this->Value() & (-this->Value() - 1)) == 0);
+ return this->HasResolvedValue() && this->ResolvedValue() < 0 &&
+ ((this->ResolvedValue() == std::numeric_limits<T>::min()) ||
+ (-this->ResolvedValue() & (-this->ResolvedValue() - 1)) == 0);
+ }
+ bool IsNegative() const {
+ return this->HasResolvedValue() && this->ResolvedValue() < 0;
}
- bool IsNegative() const { return this->HasValue() && this->Value() < 0; }
};
using Int32Matcher = IntMatcher<int32_t, IrOpcode::kInt32Constant>;
@@ -186,28 +183,36 @@ struct FloatMatcher final : public ValueMatcher<T, kOpcode> {
explicit FloatMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
bool Is(const T& value) const {
- return this->HasValue() && this->Value() == value;
+ return this->HasResolvedValue() && this->ResolvedValue() == value;
}
bool IsInRange(const T& low, const T& high) const {
- return this->HasValue() && low <= this->Value() && this->Value() <= high;
+ return this->HasResolvedValue() && low <= this->ResolvedValue() &&
+ this->ResolvedValue() <= high;
}
bool IsMinusZero() const {
- return this->Is(0.0) && std::signbit(this->Value());
+ return this->Is(0.0) && std::signbit(this->ResolvedValue());
+ }
+ bool IsNegative() const {
+ return this->HasResolvedValue() && this->ResolvedValue() < 0.0;
+ }
+ bool IsNaN() const {
+ return this->HasResolvedValue() && std::isnan(this->ResolvedValue());
+ }
+ bool IsZero() const {
+ return this->Is(0.0) && !std::signbit(this->ResolvedValue());
}
- bool IsNegative() const { return this->HasValue() && this->Value() < 0.0; }
- bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
- bool IsZero() const { return this->Is(0.0) && !std::signbit(this->Value()); }
bool IsNormal() const {
- return this->HasValue() && std::isnormal(this->Value());
+ return this->HasResolvedValue() && std::isnormal(this->ResolvedValue());
}
bool IsInteger() const {
- return this->HasValue() && std::nearbyint(this->Value()) == this->Value();
+ return this->HasResolvedValue() &&
+ std::nearbyint(this->ResolvedValue()) == this->ResolvedValue();
}
bool IsPositiveOrNegativePowerOf2() const {
- if (!this->HasValue() || (this->Value() == 0.0)) {
+ if (!this->HasResolvedValue() || (this->ResolvedValue() == 0.0)) {
return false;
}
- Double value = Double(this->Value());
+ Double value = Double(this->ResolvedValue());
return !value.IsInfinite() && base::bits::IsPowerOfTwo(value.Significand());
}
};
@@ -224,11 +229,12 @@ struct HeapObjectMatcherImpl final
: ValueMatcher<Handle<HeapObject>, kHeapConstantOpcode>(node) {}
bool Is(Handle<HeapObject> const& value) const {
- return this->HasValue() && this->Value().address() == value.address();
+ return this->HasResolvedValue() &&
+ this->ResolvedValue().address() == value.address();
}
HeapObjectRef Ref(JSHeapBroker* broker) const {
- return HeapObjectRef(broker, this->Value());
+ return HeapObjectRef(broker, this->ResolvedValue());
}
};
@@ -242,7 +248,7 @@ struct ExternalReferenceMatcher final
explicit ExternalReferenceMatcher(Node* node)
: ValueMatcher<ExternalReference, IrOpcode::kExternalConstant>(node) {}
bool Is(const ExternalReference& value) const {
- return this->HasValue() && this->Value() == value;
+ return this->HasResolvedValue() && this->ResolvedValue() == value;
}
};
@@ -285,7 +291,9 @@ struct BinopMatcher : public NodeMatcher {
const Left& left() const { return left_; }
const Right& right() const { return right_; }
- bool IsFoldable() const { return left().HasValue() && right().HasValue(); }
+ bool IsFoldable() const {
+ return left().HasResolvedValue() && right().HasResolvedValue();
+ }
bool LeftEqualsRight() const { return left().node() == right().node(); }
bool OwnsInput(Node* input) {
@@ -309,7 +317,7 @@ struct BinopMatcher : public NodeMatcher {
private:
void PutConstantOnRight() {
- if (left().HasValue() && !right().HasValue()) {
+ if (left().HasResolvedValue() && !right().HasResolvedValue()) {
SwapInputs();
}
}
@@ -340,17 +348,17 @@ struct ScaleMatcher {
if (node->InputCount() < 2) return;
BinopMatcher m(node);
if (node->opcode() == kShiftOpcode) {
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
typename BinopMatcher::RightMatcher::ValueType value =
- m.right().Value();
+ m.right().ResolvedValue();
if (value >= 0 && value <= 3) {
scale_ = static_cast<int>(value);
}
}
} else if (node->opcode() == kMulOpcode) {
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
typename BinopMatcher::RightMatcher::ValueType value =
- m.right().Value();
+ m.right().ResolvedValue();
if (value == 1) {
scale_ = 0;
} else if (value == 2) {
@@ -550,7 +558,7 @@ struct BaseWithIndexAndDisplacementMatcher {
if (right->opcode() == AddMatcher::kSubOpcode &&
OwnedByAddressingOperand(right)) {
AddMatcher right_matcher(right);
- if (right_matcher.right().HasValue()) {
+ if (right_matcher.right().HasResolvedValue()) {
// (S + (B - D))
base = right_matcher.left().node();
displacement = right_matcher.right().node();
@@ -562,7 +570,7 @@ struct BaseWithIndexAndDisplacementMatcher {
if (right->opcode() == AddMatcher::kAddOpcode &&
OwnedByAddressingOperand(right)) {
AddMatcher right_matcher(right);
- if (right_matcher.right().HasValue()) {
+ if (right_matcher.right().HasResolvedValue()) {
// (S + (B + D))
base = right_matcher.left().node();
displacement = right_matcher.right().node();
@@ -570,7 +578,7 @@ struct BaseWithIndexAndDisplacementMatcher {
// (S + (B + B))
base = right;
}
- } else if (m.right().HasValue()) {
+ } else if (m.right().HasResolvedValue()) {
// (S + D)
displacement = right;
} else {
@@ -585,7 +593,7 @@ struct BaseWithIndexAndDisplacementMatcher {
AddMatcher left_matcher(left);
Node* left_left = left_matcher.left().node();
Node* left_right = left_matcher.right().node();
- if (left_matcher.right().HasValue()) {
+ if (left_matcher.right().HasResolvedValue()) {
if (left_matcher.HasIndexInput() && left_left->OwnedBy(left)) {
// ((S - D) + B)
index = left_matcher.IndexInput();
@@ -612,7 +620,7 @@ struct BaseWithIndexAndDisplacementMatcher {
Node* left_left = left_matcher.left().node();
Node* left_right = left_matcher.right().node();
if (left_matcher.HasIndexInput() && left_left->OwnedBy(left)) {
- if (left_matcher.right().HasValue()) {
+ if (left_matcher.right().HasResolvedValue()) {
// ((S + D) + B)
index = left_matcher.IndexInput();
scale = left_matcher.scale();
@@ -620,7 +628,7 @@ struct BaseWithIndexAndDisplacementMatcher {
power_of_two_plus_one = left_matcher.power_of_two_plus_one();
displacement = left_right;
base = right;
- } else if (m.right().HasValue()) {
+ } else if (m.right().HasResolvedValue()) {
if (left->OwnedBy(node)) {
// ((S + B) + D)
index = left_matcher.IndexInput();
@@ -640,12 +648,12 @@ struct BaseWithIndexAndDisplacementMatcher {
base = right;
}
} else {
- if (left_matcher.right().HasValue()) {
+ if (left_matcher.right().HasResolvedValue()) {
// ((B + D) + B)
index = left_left;
displacement = left_right;
base = right;
- } else if (m.right().HasValue()) {
+ } else if (m.right().HasResolvedValue()) {
if (left->OwnedBy(node)) {
// ((B + B) + D)
index = left_left;
@@ -663,7 +671,7 @@ struct BaseWithIndexAndDisplacementMatcher {
}
}
} else {
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
// (B + D)
base = left;
displacement = right;
diff --git a/deps/v8/src/compiler/node-origin-table.h b/deps/v8/src/compiler/node-origin-table.h
index 4bb66a769d..c45e235a6e 100644
--- a/deps/v8/src/compiler/node-origin-table.h
+++ b/deps/v8/src/compiler/node-origin-table.h
@@ -82,10 +82,12 @@ class V8_EXPORT_PRIVATE NodeOriginTable final
if (origins_) origins_->current_origin_ = prev_origin_;
}
+ Scope(const Scope&) = delete;
+ Scope& operator=(const Scope&) = delete;
+
private:
NodeOriginTable* const origins_;
NodeOrigin prev_origin_;
- DISALLOW_COPY_AND_ASSIGN(Scope);
};
class PhaseScope final {
@@ -103,13 +105,17 @@ class V8_EXPORT_PRIVATE NodeOriginTable final
if (origins_) origins_->current_phase_name_ = prev_phase_name_;
}
+ PhaseScope(const PhaseScope&) = delete;
+ PhaseScope& operator=(const PhaseScope&) = delete;
+
private:
NodeOriginTable* const origins_;
const char* prev_phase_name_;
- DISALLOW_COPY_AND_ASSIGN(PhaseScope);
};
explicit NodeOriginTable(Graph* graph);
+ NodeOriginTable(const NodeOriginTable&) = delete;
+ NodeOriginTable& operator=(const NodeOriginTable&) = delete;
void AddDecorator();
void RemoveDecorator();
@@ -130,8 +136,6 @@ class V8_EXPORT_PRIVATE NodeOriginTable final
const char* current_phase_name_;
NodeAuxData<NodeOrigin, NodeOrigin::Unknown> table_;
-
- DISALLOW_COPY_AND_ASSIGN(NodeOriginTable);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 3b78872437..bc25b83d92 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -328,7 +328,7 @@ base::Optional<MapRef> NodeProperties::GetJSCreateMap(JSHeapBroker* broker,
receiver->opcode() == IrOpcode::kJSCreateArray);
HeapObjectMatcher mtarget(GetValueInput(receiver, 0));
HeapObjectMatcher mnewtarget(GetValueInput(receiver, 1));
- if (mtarget.HasValue() && mnewtarget.HasValue() &&
+ if (mtarget.HasResolvedValue() && mnewtarget.HasResolvedValue() &&
mnewtarget.Ref(broker).IsJSFunction()) {
ObjectRef target = mtarget.Ref(broker);
JSFunctionRef newtarget = mnewtarget.Ref(broker).AsJSFunction();
@@ -349,11 +349,11 @@ base::Optional<MapRef> NodeProperties::GetJSCreateMap(JSHeapBroker* broker,
}
// static
-NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
+NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
JSHeapBroker* broker, Node* receiver, Node* effect,
ZoneHandleSet<Map>* maps_return) {
HeapObjectMatcher m(receiver);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
HeapObjectRef receiver = m.Ref(broker);
// We don't use ICs for the Array.prototype and the Object.prototype
// because the runtime has to be able to intercept them properly, so
@@ -368,11 +368,11 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
// The {receiver_map} is only reliable when we install a stability
// code dependency.
*maps_return = ZoneHandleSet<Map>(receiver.map().object());
- return kUnreliableReceiverMaps;
+ return kUnreliableMaps;
}
}
}
- InferReceiverMapsResult result = kReliableReceiverMaps;
+ InferMapsResult result = kReliableMaps;
while (true) {
switch (effect->opcode()) {
case IrOpcode::kMapGuard: {
@@ -399,9 +399,9 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
return result;
}
// We reached the allocation of the {receiver}.
- return kNoReceiverMaps;
+ return kNoMaps;
}
- result = kUnreliableReceiverMaps; // JSCreate can have side-effect.
+ result = kUnreliableMaps; // JSCreate can have side-effect.
break;
}
case IrOpcode::kJSCreatePromise: {
@@ -423,14 +423,14 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
if (IsSame(receiver, object)) {
Node* const value = GetValueInput(effect, 1);
HeapObjectMatcher m(value);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
*maps_return = ZoneHandleSet<Map>(m.Ref(broker).AsMap().object());
return result;
}
}
// Without alias analysis we cannot tell whether this
// StoreField[map] affects {receiver} or not.
- result = kUnreliableReceiverMaps;
+ result = kUnreliableMaps;
}
break;
}
@@ -453,25 +453,25 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
if (control->opcode() != IrOpcode::kLoop) {
DCHECK(control->opcode() == IrOpcode::kDead ||
control->opcode() == IrOpcode::kMerge);
- return kNoReceiverMaps;
+ return kNoMaps;
}
// Continue search for receiver map outside the loop. Since operations
// inside the loop may change the map, the result is unreliable.
effect = GetEffectInput(effect, 0);
- result = kUnreliableReceiverMaps;
+ result = kUnreliableMaps;
continue;
}
default: {
DCHECK_EQ(1, effect->op()->EffectOutputCount());
if (effect->op()->EffectInputCount() != 1) {
// Didn't find any appropriate CheckMaps node.
- return kNoReceiverMaps;
+ return kNoMaps;
}
if (!effect->op()->HasProperty(Operator::kNoWrite)) {
// Without alias/escape analysis we cannot tell whether this
// {effect} affects {receiver} or not.
- result = kUnreliableReceiverMaps;
+ result = kUnreliableMaps;
}
break;
}
@@ -479,7 +479,7 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
// Stop walking the effect chain once we hit the definition of
// the {receiver} along the {effect}s.
- if (IsSame(receiver, effect)) return kNoReceiverMaps;
+ if (IsSame(receiver, effect)) return kNoMaps;
// Continue with the next {effect}.
DCHECK_EQ(1, effect->op()->EffectInputCount());
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 5b31f15d48..059db4f5cb 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -121,6 +121,21 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// the IfSuccess projection of {node} if present and {node} itself otherwise.
static Node* FindSuccessfulControlProjection(Node* node);
+ // Returns whether the node acts as the identity function on a value
+ // input. The input that is passed through is returned via {out_value}.
+ static bool IsValueIdentity(Node* node, Node** out_value) {
+ switch (node->opcode()) {
+ case IrOpcode::kTypeGuard:
+ *out_value = GetValueInput(node, 0);
+ return true;
+ case IrOpcode::kFoldConstant:
+ *out_value = GetValueInput(node, 1);
+ return true;
+ default:
+ return false;
+ }
+ }
+
// ---------------------------------------------------------------------------
// Miscellaneous mutators.
@@ -188,15 +203,15 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// Walks up the {effect} chain to find a witness that provides map
// information about the {receiver}. Can look through potentially
// side effecting nodes.
- enum InferReceiverMapsResult {
- kNoReceiverMaps, // No receiver maps inferred.
- kReliableReceiverMaps, // Receiver maps can be trusted.
- kUnreliableReceiverMaps // Receiver maps might have changed (side-effect).
+ enum InferMapsResult {
+ kNoMaps, // No maps inferred.
+ kReliableMaps, // Maps can be trusted.
+ kUnreliableMaps // Maps might have changed (side-effect).
};
- // DO NOT USE InferReceiverMapsUnsafe IN NEW CODE. Use MapInference instead.
- static InferReceiverMapsResult InferReceiverMapsUnsafe(
- JSHeapBroker* broker, Node* receiver, Node* effect,
- ZoneHandleSet<Map>* maps_return);
+ // DO NOT USE InferMapsUnsafe IN NEW CODE. Use MapInference instead.
+ static InferMapsResult InferMapsUnsafe(JSHeapBroker* broker, Node* object,
+ Node* effect,
+ ZoneHandleSet<Map>* maps);
// Return the initial map of the new-target if the allocation can be inlined.
static base::Optional<MapRef> GetJSCreateMap(JSHeapBroker* broker,
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index b9f1c3c844..8525fa0b01 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -218,9 +218,9 @@ void Node::InsertInputs(Zone* zone, int index, int count) {
DCHECK_LT(0, count);
DCHECK_LT(index, InputCount());
for (int i = 0; i < count; i++) {
- AppendInput(zone, InputAt(Max(InputCount() - count, 0)));
+ AppendInput(zone, InputAt(std::max(InputCount() - count, 0)));
}
- for (int i = InputCount() - count - 1; i >= Max(index, count); --i) {
+ for (int i = InputCount() - count - 1; i >= std::max(index, count); --i) {
ReplaceInput(i, InputAt(i - count));
}
for (int i = 0; i < count; i++) {
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 1936f06457..823bee4597 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -236,6 +236,8 @@ class V8_EXPORT_PRIVATE Node final {
// a node exceeds the maximum inline capacity.
Node(NodeId id, const Operator* op, int inline_count, int inline_capacity);
+ Node(const Node&) = delete;
+ Node& operator=(const Node&) = delete;
inline Address inputs_location() const;
@@ -300,8 +302,6 @@ class V8_EXPORT_PRIVATE Node final {
friend class Edge;
friend class NodeMarkerBase;
friend class NodeProperties;
-
- DISALLOW_COPY_AND_ASSIGN(Node);
};
Address Node::inputs_location() const {
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index f1faeec936..5027f734d4 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -819,22 +819,23 @@
V(I64x2ReplaceLane) \
V(I64x2ReplaceLaneI32Pair) \
V(I64x2Neg) \
+ V(I64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High) \
+ V(I64x2BitMask) \
V(I64x2Shl) \
V(I64x2ShrS) \
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Mul) \
- V(I64x2MinS) \
- V(I64x2MaxS) \
V(I64x2Eq) \
- V(I64x2Ne) \
- V(I64x2GtS) \
- V(I64x2GeS) \
V(I64x2ShrU) \
- V(I64x2MinU) \
- V(I64x2MaxU) \
- V(I64x2GtU) \
- V(I64x2GeU) \
+ V(I64x2ExtMulLowI32x4S) \
+ V(I64x2ExtMulHighI32x4S) \
+ V(I64x2ExtMulLowI32x4U) \
+ V(I64x2ExtMulHighI32x4U) \
+ V(I64x2SignSelect) \
V(I32x4Splat) \
V(I32x4ExtractLane) \
V(I32x4ReplaceLane) \
@@ -869,6 +870,13 @@
V(I32x4Abs) \
V(I32x4BitMask) \
V(I32x4DotI16x8S) \
+ V(I32x4ExtMulLowI16x8S) \
+ V(I32x4ExtMulHighI16x8S) \
+ V(I32x4ExtMulLowI16x8U) \
+ V(I32x4ExtMulHighI16x8U) \
+ V(I32x4SignSelect) \
+ V(I32x4ExtAddPairwiseI16x8S) \
+ V(I32x4ExtAddPairwiseI16x8U) \
V(I16x8Splat) \
V(I16x8ExtractLaneU) \
V(I16x8ExtractLaneS) \
@@ -880,10 +888,10 @@
V(I16x8ShrS) \
V(I16x8SConvertI32x4) \
V(I16x8Add) \
- V(I16x8AddSaturateS) \
+ V(I16x8AddSatS) \
V(I16x8AddHoriz) \
V(I16x8Sub) \
- V(I16x8SubSaturateS) \
+ V(I16x8SubSatS) \
V(I16x8Mul) \
V(I16x8MinS) \
V(I16x8MaxS) \
@@ -897,8 +905,8 @@
V(I16x8UConvertI8x16High) \
V(I16x8ShrU) \
V(I16x8UConvertI32x4) \
- V(I16x8AddSaturateU) \
- V(I16x8SubSaturateU) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
V(I16x8MinU) \
V(I16x8MaxU) \
V(I16x8LtU) \
@@ -906,8 +914,16 @@
V(I16x8GtU) \
V(I16x8GeU) \
V(I16x8RoundingAverageU) \
+ V(I16x8Q15MulRSatS) \
V(I16x8Abs) \
V(I16x8BitMask) \
+ V(I16x8ExtMulLowI8x16S) \
+ V(I16x8ExtMulHighI8x16S) \
+ V(I16x8ExtMulLowI8x16U) \
+ V(I16x8ExtMulHighI8x16U) \
+ V(I16x8SignSelect) \
+ V(I16x8ExtAddPairwiseI8x16S) \
+ V(I16x8ExtAddPairwiseI8x16U) \
V(I8x16Splat) \
V(I8x16ExtractLaneU) \
V(I8x16ExtractLaneS) \
@@ -917,9 +933,9 @@
V(I8x16Shl) \
V(I8x16ShrS) \
V(I8x16Add) \
- V(I8x16AddSaturateS) \
+ V(I8x16AddSatS) \
V(I8x16Sub) \
- V(I8x16SubSaturateS) \
+ V(I8x16SubSatS) \
V(I8x16Mul) \
V(I8x16MinS) \
V(I8x16MaxS) \
@@ -930,8 +946,8 @@
V(I8x16GtS) \
V(I8x16GeS) \
V(I8x16UConvertI16x8) \
- V(I8x16AddSaturateU) \
- V(I8x16SubSaturateU) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU) \
V(I8x16ShrU) \
V(I8x16MinU) \
V(I8x16MaxU) \
@@ -940,8 +956,10 @@
V(I8x16GtU) \
V(I8x16GeU) \
V(I8x16RoundingAverageU) \
+ V(I8x16Popcnt) \
V(I8x16Abs) \
V(I8x16BitMask) \
+ V(I8x16SignSelect) \
V(S128Load) \
V(S128Store) \
V(S128Zero) \
@@ -954,15 +972,15 @@
V(S128AndNot) \
V(I8x16Swizzle) \
V(I8x16Shuffle) \
- V(V64x2AnyTrue) \
- V(V64x2AllTrue) \
V(V32x4AnyTrue) \
V(V32x4AllTrue) \
V(V16x8AnyTrue) \
V(V16x8AllTrue) \
V(V8x16AnyTrue) \
V(V8x16AllTrue) \
- V(LoadTransform)
+ V(LoadTransform) \
+ V(LoadLane) \
+ V(StoreLane)
#define VALUE_OP_LIST(V) \
COMMON_OP_LIST(V) \
@@ -1094,12 +1112,15 @@ class V8_EXPORT_PRIVATE IrOpcode {
case kJSCreateLiteralArray:
case kJSCreateLiteralObject:
case kJSCreateLiteralRegExp:
+ case kJSForInNext:
+ case kJSForInPrepare:
case kJSGetIterator:
case kJSGetTemplateObject:
case kJSHasProperty:
case kJSInstanceOf:
case kJSLoadGlobal:
case kJSLoadNamed:
+ case kJSLoadNamedFromSuper:
case kJSLoadProperty:
case kJSStoreDataPropertyInLiteral:
case kJSStoreGlobal:
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index c77249f621..a8e29416b5 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -193,16 +193,17 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSCloneObject:
// Property access operations
+ case IrOpcode::kJSDeleteProperty:
+ case IrOpcode::kJSLoadGlobal:
case IrOpcode::kJSLoadNamed:
case IrOpcode::kJSLoadNamedFromSuper:
- case IrOpcode::kJSStoreNamed:
case IrOpcode::kJSLoadProperty:
- case IrOpcode::kJSStoreProperty:
- case IrOpcode::kJSLoadGlobal:
+ case IrOpcode::kJSStoreDataPropertyInLiteral:
+ case IrOpcode::kJSStoreInArrayLiteral:
case IrOpcode::kJSStoreGlobal:
+ case IrOpcode::kJSStoreNamed:
case IrOpcode::kJSStoreNamedOwn:
- case IrOpcode::kJSStoreDataPropertyInLiteral:
- case IrOpcode::kJSDeleteProperty:
+ case IrOpcode::kJSStoreProperty:
// Conversions
case IrOpcode::kJSToLength:
diff --git a/deps/v8/src/compiler/operator-properties.h b/deps/v8/src/compiler/operator-properties.h
index 47db81df98..e566a58a02 100644
--- a/deps/v8/src/compiler/operator-properties.h
+++ b/deps/v8/src/compiler/operator-properties.h
@@ -17,6 +17,9 @@ class Operator;
class V8_EXPORT_PRIVATE OperatorProperties final {
public:
+ OperatorProperties(const OperatorProperties&) = delete;
+ OperatorProperties& operator=(const OperatorProperties&) = delete;
+
static bool HasContextInput(const Operator* op);
static int GetContextInputCount(const Operator* op) {
return HasContextInput(op) ? 1 : 0;
@@ -32,9 +35,6 @@ class V8_EXPORT_PRIVATE OperatorProperties final {
static int GetTotalInputCount(const Operator* op);
static bool IsBasicBlockBegin(const Operator* op);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(OperatorProperties);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index 7227c92cd8..3239eb0269 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -65,6 +65,8 @@ class V8_EXPORT_PRIVATE Operator : public NON_EXPORTED_BASE(ZoneObject) {
Operator(Opcode opcode, Properties properties, const char* mnemonic,
size_t value_in, size_t effect_in, size_t control_in,
size_t value_out, size_t effect_out, size_t control_out);
+ Operator(const Operator&) = delete;
+ Operator& operator=(const Operator&) = delete;
// A small integer unique to all instances of a particular kind of operator,
// useful for quick matching for specific kinds of operators. For fast access
@@ -141,8 +143,6 @@ class V8_EXPORT_PRIVATE Operator : public NON_EXPORTED_BASE(ZoneObject) {
uint32_t value_out_;
uint8_t effect_out_;
uint32_t control_out_;
-
- DISALLOW_COPY_AND_ASSIGN(Operator);
};
DEFINE_OPERATORS_FOR_FLAGS(Operator::Properties)
diff --git a/deps/v8/src/compiler/pipeline-statistics.h b/deps/v8/src/compiler/pipeline-statistics.h
index c034183a75..330c523521 100644
--- a/deps/v8/src/compiler/pipeline-statistics.h
+++ b/deps/v8/src/compiler/pipeline-statistics.h
@@ -23,6 +23,8 @@ class PipelineStatistics : public Malloced {
PipelineStatistics(OptimizedCompilationInfo* info,
CompilationStatistics* turbo_stats, ZoneStats* zone_stats);
~PipelineStatistics();
+ PipelineStatistics(const PipelineStatistics&) = delete;
+ PipelineStatistics& operator=(const PipelineStatistics&) = delete;
void BeginPhaseKind(const char* phase_kind_name);
void EndPhaseKind();
@@ -35,6 +37,8 @@ class PipelineStatistics : public Malloced {
class CommonStats {
public:
CommonStats() : outer_zone_initial_size_(0) {}
+ CommonStats(const CommonStats&) = delete;
+ CommonStats& operator=(const CommonStats&) = delete;
void Begin(PipelineStatistics* pipeline_stats);
void End(PipelineStatistics* pipeline_stats,
@@ -44,9 +48,6 @@ class PipelineStatistics : public Malloced {
base::ElapsedTimer timer_;
size_t outer_zone_initial_size_;
size_t allocated_bytes_at_start_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CommonStats);
};
bool InPhaseKind() { return !!phase_kind_stats_.scope_; }
@@ -71,8 +72,6 @@ class PipelineStatistics : public Malloced {
// Stats for phase.
const char* phase_name_;
CommonStats phase_stats_;
-
- DISALLOW_COPY_AND_ASSIGN(PipelineStatistics);
};
@@ -85,11 +84,11 @@ class PhaseScope {
~PhaseScope() {
if (pipeline_stats_ != nullptr) pipeline_stats_->EndPhase();
}
+ PhaseScope(const PhaseScope&) = delete;
+ PhaseScope& operator=(const PhaseScope&) = delete;
private:
PipelineStatistics* const pipeline_stats_;
-
- DISALLOW_COPY_AND_ASSIGN(PhaseScope);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 7b99d07b6b..1023c6bb68 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -83,6 +83,7 @@
#include "src/diagnostics/code-tracer.h"
#include "src/diagnostics/disassembler.h"
#include "src/execution/isolate-inl.h"
+#include "src/heap/local-heap.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/objects/shared-function-info.h"
@@ -151,9 +152,9 @@ class PipelineData {
instruction_zone_(instruction_zone_scope_.zone()),
codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()),
- broker_(new JSHeapBroker(
- isolate_, info_->zone(), info_->trace_heap_broker(),
- is_concurrent_inlining, info->IsNativeContextIndependent())),
+ broker_(new JSHeapBroker(isolate_, info_->zone(),
+ info_->trace_heap_broker(),
+ is_concurrent_inlining, info->code_kind())),
register_allocation_zone_scope_(zone_stats_,
kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
@@ -288,6 +289,9 @@ class PipelineData {
DeleteGraphZone();
}
+ PipelineData(const PipelineData&) = delete;
+ PipelineData& operator=(const PipelineData&) = delete;
+
Isolate* isolate() const { return isolate_; }
AccountingAllocator* allocator() const { return allocator_; }
OptimizedCompilationInfo* info() const { return info_; }
@@ -486,6 +490,7 @@ class PipelineData {
call_descriptor->CalculateFixedFrameSize(info()->code_kind());
}
frame_ = codegen_zone()->New<Frame>(fixed_frame_size);
+ if (osr_helper_.has_value()) osr_helper()->SetupFrame(frame());
}
void InitializeTopTierRegisterAllocationData(
@@ -634,8 +639,6 @@ class PipelineData {
RuntimeCallStats* runtime_call_stats_ = nullptr;
const ProfileDataFromFile* profile_data_ = nullptr;
-
- DISALLOW_COPY_AND_ASSIGN(PipelineData);
};
class PipelineImpl final {
@@ -701,6 +704,8 @@ class SourcePositionWrapper final : public Reducer {
SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table)
: reducer_(reducer), table_(table) {}
~SourcePositionWrapper() final = default;
+ SourcePositionWrapper(const SourcePositionWrapper&) = delete;
+ SourcePositionWrapper& operator=(const SourcePositionWrapper&) = delete;
const char* reducer_name() const override { return reducer_->reducer_name(); }
@@ -715,8 +720,6 @@ class SourcePositionWrapper final : public Reducer {
private:
Reducer* const reducer_;
SourcePositionTable* const table_;
-
- DISALLOW_COPY_AND_ASSIGN(SourcePositionWrapper);
};
class NodeOriginsWrapper final : public Reducer {
@@ -724,6 +727,8 @@ class NodeOriginsWrapper final : public Reducer {
NodeOriginsWrapper(Reducer* reducer, NodeOriginTable* table)
: reducer_(reducer), table_(table) {}
~NodeOriginsWrapper() final = default;
+ NodeOriginsWrapper(const NodeOriginsWrapper&) = delete;
+ NodeOriginsWrapper& operator=(const NodeOriginsWrapper&) = delete;
const char* reducer_name() const override { return reducer_->reducer_name(); }
@@ -737,8 +742,6 @@ class NodeOriginsWrapper final : public Reducer {
private:
Reducer* const reducer_;
NodeOriginTable* const table_;
-
- DISALLOW_COPY_AND_ASSIGN(NodeOriginsWrapper);
};
class PipelineRunScope {
@@ -764,18 +767,21 @@ class PipelineRunScope {
RuntimeCallTimerScope runtime_call_timer_scope;
};
-// LocalHeapScope encapsulates the liveness of the brokers's LocalHeap.
-class LocalHeapScope {
+// LocalIsolateScope encapsulates the phase where persistent handles are
+// attached to the LocalHeap inside {local_isolate}.
+class LocalIsolateScope {
public:
- explicit LocalHeapScope(JSHeapBroker* broker, OptimizedCompilationInfo* info)
+ explicit LocalIsolateScope(JSHeapBroker* broker,
+ OptimizedCompilationInfo* info,
+ LocalIsolate* local_isolate)
: broker_(broker), info_(info) {
- broker_->InitializeLocalHeap(info_);
- info_->tick_counter().AttachLocalHeap(broker_->local_heap());
+ broker_->AttachLocalIsolate(info_, local_isolate);
+ info_->tick_counter().AttachLocalHeap(local_isolate->heap());
}
- ~LocalHeapScope() {
+ ~LocalIsolateScope() {
info_->tick_counter().DetachLocalHeap();
- broker_->TearDownLocalHeap(info_);
+ broker_->DetachLocalIsolate(info_);
}
private:
@@ -1026,10 +1032,13 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
Handle<JSFunction> function, BailoutId osr_offset,
JavaScriptFrame* osr_frame, CodeKind code_kind);
~PipelineCompilationJob() final;
+ PipelineCompilationJob(const PipelineCompilationJob&) = delete;
+ PipelineCompilationJob& operator=(const PipelineCompilationJob&) = delete;
protected:
Status PrepareJobImpl(Isolate* isolate) final;
- Status ExecuteJobImpl(RuntimeCallStats* stats) final;
+ Status ExecuteJobImpl(RuntimeCallStats* stats,
+ LocalIsolate* local_isolate) final;
Status FinalizeJobImpl(Isolate* isolate) final;
// Registers weak object to optimized code dependencies.
@@ -1045,8 +1054,6 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
PipelineData data_;
PipelineImpl pipeline_;
Linkage* linkage_;
-
- DISALLOW_COPY_AND_ASSIGN(PipelineCompilationJob);
};
PipelineCompilationJob::PipelineCompilationJob(
@@ -1110,7 +1117,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (FLAG_turbo_loop_peeling) {
compilation_info()->set_loop_peeling();
}
- if (FLAG_turbo_inlining &&
+ if (FLAG_turbo_inlining && !compilation_info()->IsTurboprop() &&
!compilation_info()->IsNativeContextIndependent()) {
compilation_info()->set_inlining();
}
@@ -1139,7 +1146,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (compilation_info()->closure()->raw_feedback_cell().map() ==
ReadOnlyRoots(isolate).one_closure_cell_map() &&
!compilation_info()->is_osr() &&
- !compilation_info()->IsNativeContextIndependent()) {
+ !compilation_info()->IsNativeContextIndependent() &&
+ !compilation_info()->IsTurboprop()) {
compilation_info()->set_function_context_specializing();
data_.ChooseSpecializationContext();
}
@@ -1157,11 +1165,6 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
- // Make sure that we have generated the deopt entries code. This is in order
- // to avoid triggering the generation of deopt entries later during code
- // assembly.
- Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate);
-
pipeline_.Serialize();
if (!data_.broker()->is_concurrent_inlining()) {
@@ -1175,30 +1178,28 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
}
PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl(
- RuntimeCallStats* stats) {
+ RuntimeCallStats* stats, LocalIsolate* local_isolate) {
// Ensure that the RuntimeCallStats table is only available during execution
// and not during finalization as that might be on a different thread.
PipelineJobScope scope(&data_, stats);
- {
- LocalHeapScope local_heap_scope(data_.broker(), data_.info());
- if (data_.broker()->is_concurrent_inlining()) {
- if (!pipeline_.CreateGraph()) {
- return AbortOptimization(BailoutReason::kGraphBuildingFailed);
- }
- }
-
- // We selectively Unpark inside OptimizeGraph*.
- ParkedScope parked_scope(data_.broker()->local_heap());
+ LocalIsolateScope local_isolate_scope(data_.broker(), data_.info(),
+ local_isolate);
- bool success;
- if (FLAG_turboprop) {
- success = pipeline_.OptimizeGraphForMidTier(linkage_);
- } else {
- success = pipeline_.OptimizeGraph(linkage_);
+ if (data_.broker()->is_concurrent_inlining()) {
+ if (!pipeline_.CreateGraph()) {
+ return AbortOptimization(BailoutReason::kGraphBuildingFailed);
}
- if (!success) return FAILED;
}
+ // We selectively Unpark inside OptimizeGraph*.
+ bool success;
+ if (compilation_info_.code_kind() == CodeKind::TURBOPROP) {
+ success = pipeline_.OptimizeGraphForMidTier(linkage_);
+ } else {
+ success = pipeline_.OptimizeGraph(linkage_);
+ }
+ if (!success) return FAILED;
+
pipeline_.AssembleCode(linkage_);
return SUCCEEDED;
@@ -1279,9 +1280,14 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
pipeline_(&data_),
wasm_engine_(wasm_engine) {}
+ WasmHeapStubCompilationJob(const WasmHeapStubCompilationJob&) = delete;
+ WasmHeapStubCompilationJob& operator=(const WasmHeapStubCompilationJob&) =
+ delete;
+
protected:
Status PrepareJobImpl(Isolate* isolate) final;
- Status ExecuteJobImpl(RuntimeCallStats* stats) final;
+ Status ExecuteJobImpl(RuntimeCallStats* stats,
+ LocalIsolate* local_isolate) final;
Status FinalizeJobImpl(Isolate* isolate) final;
private:
@@ -1294,8 +1300,6 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
PipelineData data_;
PipelineImpl pipeline_;
wasm::WasmEngine* wasm_engine_;
-
- DISALLOW_COPY_AND_ASSIGN(WasmHeapStubCompilationJob);
};
// static
@@ -1316,7 +1320,7 @@ CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
}
CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl(
- RuntimeCallStats* stats) {
+ RuntimeCallStats* stats, LocalIsolate* local_isolate) {
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
pipeline_statistics.reset(new PipelineStatistics(
@@ -1405,10 +1409,11 @@ struct GraphBuilderPhase {
JSFunctionRef closure(data->broker(), data->info()->closure());
CallFrequency frequency(1.0f);
BuildGraphFromBytecode(
- data->broker(), temp_zone, closure.shared(), closure.feedback_vector(),
- data->info()->osr_offset(), data->jsgraph(), frequency,
- data->source_positions(), SourcePosition::kNotInlined,
- data->info()->code_kind(), flags, &data->info()->tick_counter());
+ data->broker(), temp_zone, closure.shared(),
+ closure.raw_feedback_cell(), data->info()->osr_offset(),
+ data->jsgraph(), frequency, data->source_positions(),
+ SourcePosition::kNotInlined, data->info()->code_kind(), flags,
+ &data->info()->tick_counter());
}
};
@@ -1667,11 +1672,11 @@ struct TypeAssertionsPhase {
struct SimplifiedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(SimplifiedLowering)
- void Run(PipelineData* data, Zone* temp_zone) {
+ void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
data->source_positions(), data->node_origins(),
data->info()->GetPoisoningMitigationLevel(),
- &data->info()->tick_counter());
+ &data->info()->tick_counter(), linkage);
// RepresentationChanger accesses the heap.
UnparkedScopeIfNeeded scope(data->broker());
@@ -1691,6 +1696,9 @@ struct LoopPeelingPhase {
LoopTree* loop_tree = LoopFinder::BuildLoopTree(
data->jsgraph()->graph(), &data->info()->tick_counter(), temp_zone);
+ // We call the typer inside of PeelInnerLoopsOfTree which inspects heap
+ // objects, so we need to unpark the local heap.
+ UnparkedScopeIfNeeded scope(data->broker());
LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone,
data->source_positions(), data->node_origins())
.PeelInnerLoopsOfTree();
@@ -1792,7 +1800,8 @@ struct EffectControlLinearizationPhase {
// - introduce effect phis and rewire effects to get SSA again.
LinearizeEffectControl(data->jsgraph(), schedule, temp_zone,
data->source_positions(), data->node_origins(),
- mask_array_index, MaintainSchedule::kDiscard);
+ mask_array_index, MaintainSchedule::kDiscard,
+ data->broker());
}
{
// The {EffectControlLinearizer} might leave {Dead} nodes behind, so we
@@ -1967,7 +1976,8 @@ struct ScheduledEffectControlLinearizationPhase {
// - introduce effect phis and rewire effects to get SSA again.
LinearizeEffectControl(data->jsgraph(), data->schedule(), temp_zone,
data->source_positions(), data->node_origins(),
- mask_array_index, MaintainSchedule::kMaintain);
+ mask_array_index, MaintainSchedule::kMaintain,
+ data->broker());
// TODO(rmcilroy) Avoid having to rebuild rpo_order on schedule each time.
Scheduler::ComputeSpecialRPO(temp_zone, data->schedule());
@@ -2128,7 +2138,7 @@ struct InstructionSelectionPhase {
data->info()->switch_jump_table()
? InstructionSelector::kEnableSwitchJumpTable
: InstructionSelector::kDisableSwitchJumpTable,
- &data->info()->tick_counter(),
+ &data->info()->tick_counter(), data->broker(),
data->address_of_max_unoptimized_frame_height(),
data->address_of_max_pushed_argument_count(),
data->info()->source_positions()
@@ -2469,6 +2479,7 @@ void PipelineImpl::Serialize() {
bool PipelineImpl::CreateGraph() {
PipelineData* data = this->data_;
+ UnparkedScopeIfNeeded unparked_scope(data->broker());
data->BeginPhaseKind("V8.TFGraphCreation");
@@ -2557,7 +2568,7 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
// Perform simplified lowering. This has to run w/o the Typer decorator,
// because we cannot compute meaningful types anyways, and the computed types
// might even conflict with the representation/truncation logic.
- Run<SimplifiedLoweringPhase>();
+ Run<SimplifiedLoweringPhase>(linkage);
RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
// From now on it is invalid to look at types on the nodes, because the types
@@ -2578,6 +2589,8 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
data->BeginPhaseKind("V8.TFBlockBuilding");
+ data->InitializeFrameData(linkage->GetIncomingDescriptor());
+
// Run early optimization pass.
Run<EarlyOptimizationPhase>();
RunPrintAndVerify(EarlyOptimizationPhase::phase_name(), true);
@@ -2650,7 +2663,7 @@ bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
// Perform simplified lowering. This has to run w/o the Typer decorator,
// because we cannot compute meaningful types anyways, and the computed types
// might even conflict with the representation/truncation logic.
- Run<SimplifiedLoweringPhase>();
+ Run<SimplifiedLoweringPhase>(linkage);
RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
// From now on it is invalid to look at types on the nodes, because the types
@@ -2671,6 +2684,8 @@ bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
data->BeginPhaseKind("V8.TFBlockBuilding");
+ data->InitializeFrameData(linkage->GetIncomingDescriptor());
+
ComputeScheduledGraph();
Run<ScheduledEffectControlLinearizationPhase>();
@@ -3000,7 +3015,6 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
PipelineImpl pipeline(&data);
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
- Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate);
{
CompilationHandleScope compilation_scope(isolate, info);
@@ -3008,7 +3022,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
info->ReopenHandlesInNewHandleScope(isolate);
pipeline.Serialize();
// Emulating the proper pipeline, we call CreateGraph on different places
- // (i.e before or after creating a LocalHeapScope) depending on
+ // (i.e before or after creating a LocalIsolateScope) depending on
// is_concurrent_inlining.
if (!data.broker()->is_concurrent_inlining()) {
if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
@@ -3016,13 +3030,15 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
}
{
- LocalHeapScope local_heap_scope(data.broker(), info);
+ LocalIsolate local_isolate(isolate, ThreadKind::kMain);
+ LocalIsolateScope local_isolate_scope(data.broker(), info, &local_isolate);
if (data.broker()->is_concurrent_inlining()) {
if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
}
// We selectively Unpark inside OptimizeGraph.
- ParkedScope parked_scope(data.broker()->local_heap());
if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
+
+ pipeline.AssembleCode(&linkage);
}
const bool will_retire_broker = out_broker == nullptr;
@@ -3034,7 +3050,6 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
info->DetachPersistentHandles(), info->DetachCanonicalHandles());
}
- pipeline.AssembleCode(&linkage);
Handle<Code> code;
if (pipeline.FinalizeCode(will_retire_broker).ToHandle(&code) &&
pipeline.CommitDependencies(code)) {
@@ -3224,7 +3239,7 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
bool use_mid_tier_register_allocator,
bool run_verifier) {
OptimizedCompilationInfo info(ArrayVector("testing"), sequence->zone(),
- CodeKind::STUB);
+ CodeKind::FOR_TESTING);
ZoneStats zone_stats(sequence->isolate()->allocator());
PipelineData data(&zone_stats, &info, sequence->isolate(), sequence);
data.InitializeFrameData(nullptr);
@@ -3310,7 +3325,11 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
data->InitializeInstructionSequence(call_descriptor);
- data->InitializeFrameData(call_descriptor);
+ // Depending on which code path led us to this function, the frame may or
+ // may not have been initialized. If it hasn't yet, initialize it now.
+ if (!data->frame()) {
+ data->InitializeFrameData(call_descriptor);
+ }
// Select and schedule instructions covering the scheduled graph.
Run<InstructionSelectionPhase>(linkage);
if (data->compilation_failed()) {
@@ -3366,7 +3385,7 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
config = RegisterConfiguration::Default();
}
- if (FLAG_turboprop_mid_tier_reg_alloc) {
+ if (data->info()->IsTurboprop() && FLAG_turboprop_mid_tier_reg_alloc) {
AllocateRegistersForMidTier(config, call_descriptor, run_verifier);
} else {
AllocateRegistersForTopTier(config, call_descriptor, run_verifier);
@@ -3468,6 +3487,8 @@ void PipelineImpl::AssembleCode(Linkage* linkage,
data->BeginPhaseKind("V8.TFCodeGeneration");
data->InitializeCodeGenerator(linkage, std::move(buffer));
+ UnparkedScopeIfNeeded unparked_scope(data->broker(), FLAG_code_comments);
+
Run<AssembleCodePhase>();
if (data->info()->trace_turbo_json()) {
TurboJsonFile json_of(data->info(), std::ios_base::app);
@@ -3606,7 +3627,6 @@ void PipelineImpl::AllocateRegistersForTopTier(
flags |= RegisterAllocationFlag::kTraceAllocation;
}
data->InitializeTopTierRegisterAllocationData(config, call_descriptor, flags);
- if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame());
Run<MeetRegisterConstraintsPhase>();
Run<ResolvePhisPhase>();
@@ -3690,8 +3710,6 @@ void PipelineImpl::AllocateRegistersForMidTier(
data->sequence()->ValidateDeferredBlockEntryPaths();
data->sequence()->ValidateDeferredBlockExitPaths();
#endif
-
- if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame());
data->InitializeMidTierRegisterAllocationData(config, call_descriptor);
TraceSequence(info(), data, "before register allocation");
diff --git a/deps/v8/src/compiler/processed-feedback.h b/deps/v8/src/compiler/processed-feedback.h
index 282923e0c3..da3785f35e 100644
--- a/deps/v8/src/compiler/processed-feedback.h
+++ b/deps/v8/src/compiler/processed-feedback.h
@@ -178,19 +178,19 @@ class MinimorphicLoadPropertyAccessFeedback : public ProcessedFeedback {
MinimorphicLoadPropertyAccessFeedback(NameRef const& name,
FeedbackSlotKind slot_kind,
Handle<Object> handler,
- MaybeHandle<Map> maybe_map,
+ ZoneVector<Handle<Map>> const& maps,
bool has_migration_target_maps);
NameRef const& name() const { return name_; }
- bool is_monomorphic() const { return !maybe_map_.is_null(); }
+ bool is_monomorphic() const { return maps_.size() == 1; }
Handle<Object> handler() const { return handler_; }
- MaybeHandle<Map> map() const { return maybe_map_; }
+ ZoneVector<Handle<Map>> const& maps() const { return maps_; }
bool has_migration_target_maps() const { return has_migration_target_maps_; }
private:
NameRef const name_;
Handle<Object> const handler_;
- MaybeHandle<Map> const maybe_map_;
+ ZoneVector<Handle<Map>> const maps_;
bool const has_migration_target_maps_;
};
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index 4235160037..5214f7ad9b 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -82,30 +82,30 @@ bool PropertyAccessBuilder::TryBuildNumberCheck(
}
void PropertyAccessBuilder::BuildCheckMaps(
- Node* receiver, Node** effect, Node* control,
- ZoneVector<Handle<Map>> const& receiver_maps) {
- HeapObjectMatcher m(receiver);
- if (m.HasValue()) {
- MapRef receiver_map = m.Ref(broker()).map();
- if (receiver_map.is_stable()) {
- for (Handle<Map> map : receiver_maps) {
- if (MapRef(broker(), map).equals(receiver_map)) {
- dependencies()->DependOnStableMap(receiver_map);
+ Node* object, Node** effect, Node* control,
+ ZoneVector<Handle<Map>> const& maps) {
+ HeapObjectMatcher m(object);
+ if (m.HasResolvedValue()) {
+ MapRef object_map = m.Ref(broker()).map();
+ if (object_map.is_stable()) {
+ for (Handle<Map> map : maps) {
+ if (MapRef(broker(), map).equals(object_map)) {
+ dependencies()->DependOnStableMap(object_map);
return;
}
}
}
}
- ZoneHandleSet<Map> maps;
+ ZoneHandleSet<Map> map_set;
CheckMapsFlags flags = CheckMapsFlag::kNone;
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- maps.insert(receiver_map.object(), graph()->zone());
- if (receiver_map.is_migration_target()) {
+ for (Handle<Map> map : maps) {
+ MapRef object_map(broker(), map);
+ map_set.insert(object_map.object(), graph()->zone());
+ if (object_map.is_migration_target()) {
flags |= CheckMapsFlag::kTryMigrateInstance;
}
}
- *effect = graph()->NewNode(simplified()->CheckMaps(flags, maps), receiver,
+ *effect = graph()->NewNode(simplified()->CheckMaps(flags, map_set), object,
*effect, control);
}
@@ -124,12 +124,12 @@ Node* PropertyAccessBuilder::BuildCheckValue(Node* receiver, Effect* effect,
}
Node* PropertyAccessBuilder::ResolveHolder(
- PropertyAccessInfo const& access_info, Node* receiver) {
+ PropertyAccessInfo const& access_info, Node* lookup_start_object) {
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
return jsgraph()->Constant(ObjectRef(broker(), holder));
}
- return receiver;
+ return lookup_start_object;
}
MachineRepresentation PropertyAccessBuilder::ConvertRepresentation(
@@ -150,25 +150,27 @@ MachineRepresentation PropertyAccessBuilder::ConvertRepresentation(
Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
NameRef const& name, PropertyAccessInfo const& access_info,
- Node* receiver) {
+ Node* lookup_start_object) {
if (!access_info.IsDataConstant()) return nullptr;
// First, determine if we have a constant holder to load from.
Handle<JSObject> holder;
// If {access_info} has a holder, just use it.
if (!access_info.holder().ToHandle(&holder)) {
- // Otherwise, try to match the {receiver} as a constant.
- HeapObjectMatcher m(receiver);
- if (!m.HasValue() || !m.Ref(broker()).IsJSObject()) return nullptr;
+ // Otherwise, try to match the {lookup_start_object} as a constant.
+ HeapObjectMatcher m(lookup_start_object);
+ if (!m.HasResolvedValue() || !m.Ref(broker()).IsJSObject()) return nullptr;
- // Let us make sure the actual map of the constant receiver is among
- // the maps in {access_info}.
- MapRef receiver_map = m.Ref(broker()).map();
- if (std::find_if(access_info.receiver_maps().begin(),
- access_info.receiver_maps().end(), [&](Handle<Map> map) {
- return MapRef(broker(), map).equals(receiver_map);
- }) == access_info.receiver_maps().end()) {
- // The map of the receiver is not in the feedback, let us bail out.
+ // Let us make sure the actual map of the constant lookup_start_object is
+ // among the maps in {access_info}.
+ MapRef lookup_start_object_map = m.Ref(broker()).map();
+ if (std::find_if(
+ access_info.lookup_start_object_maps().begin(),
+ access_info.lookup_start_object_maps().end(), [&](Handle<Map> map) {
+ return MapRef(broker(), map).equals(lookup_start_object_map);
+ }) == access_info.lookup_start_object_maps().end()) {
+ // The map of the lookup_start_object is not in the feedback, let us bail
+ // out.
return nullptr;
}
holder = m.Ref(broker()).AsJSObject().object();
@@ -253,7 +255,7 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
Node* PropertyAccessBuilder::BuildMinimorphicLoadDataField(
NameRef const& name, MinimorphicLoadPropertyAccessInfo const& access_info,
- Node* receiver, Node** effect, Node** control) {
+ Node* lookup_start_object, Node** effect, Node** control) {
DCHECK_NULL(dependencies());
MachineRepresentation const field_representation =
ConvertRepresentation(access_info.field_representation());
@@ -268,22 +270,22 @@ Node* PropertyAccessBuilder::BuildMinimorphicLoadDataField(
kFullWriteBarrier,
LoadSensitivity::kCritical,
ConstFieldInfo::None()};
- return BuildLoadDataField(name, receiver, field_access,
+ return BuildLoadDataField(name, lookup_start_object, field_access,
access_info.is_inobject(), effect, control);
}
Node* PropertyAccessBuilder::BuildLoadDataField(
- NameRef const& name, PropertyAccessInfo const& access_info, Node* receiver,
- Node** effect, Node** control) {
+ NameRef const& name, PropertyAccessInfo const& access_info,
+ Node* lookup_start_object, Node** effect, Node** control) {
DCHECK(access_info.IsDataField() || access_info.IsDataConstant());
- if (Node* value =
- TryBuildLoadConstantDataField(name, access_info, receiver)) {
+ if (Node* value = TryBuildLoadConstantDataField(name, access_info,
+ lookup_start_object)) {
return value;
}
MachineRepresentation const field_representation =
ConvertRepresentation(access_info.field_representation());
- Node* storage = ResolveHolder(access_info, receiver);
+ Node* storage = ResolveHolder(access_info, lookup_start_object);
FieldAccess field_access = {
kTaggedBase,
diff --git a/deps/v8/src/compiler/property-access-builder.h b/deps/v8/src/compiler/property-access-builder.h
index 05436c2635..237f501dbb 100644
--- a/deps/v8/src/compiler/property-access-builder.h
+++ b/deps/v8/src/compiler/property-access-builder.h
@@ -45,13 +45,13 @@ class PropertyAccessBuilder {
// TODO(jgruber): Remove the untyped version once all uses are
// updated.
- void BuildCheckMaps(Node* receiver, Node** effect, Node* control,
- ZoneVector<Handle<Map>> const& receiver_maps);
- void BuildCheckMaps(Node* receiver, Effect* effect, Control control,
- ZoneVector<Handle<Map>> const& receiver_maps) {
+ void BuildCheckMaps(Node* object, Node** effect, Node* control,
+ ZoneVector<Handle<Map>> const& maps);
+ void BuildCheckMaps(Node* object, Effect* effect, Control control,
+ ZoneVector<Handle<Map>> const& maps) {
Node* e = *effect;
Node* c = control;
- BuildCheckMaps(receiver, &e, c, receiver_maps);
+ BuildCheckMaps(object, &e, c, maps);
*effect = e;
}
Node* BuildCheckValue(Node* receiver, Effect* effect, Control control,
@@ -61,13 +61,14 @@ class PropertyAccessBuilder {
// properties (without heap-object or map checks).
Node* BuildLoadDataField(NameRef const& name,
PropertyAccessInfo const& access_info,
- Node* receiver, Node** effect, Node** control);
+ Node* lookup_start_object, Node** effect,
+ Node** control);
// Builds the load for data-field access for minimorphic loads that use
// dynamic map checks. These cannot depend on any information from the maps.
Node* BuildMinimorphicLoadDataField(
NameRef const& name, MinimorphicLoadPropertyAccessInfo const& access_info,
- Node* receiver, Node** effect, Node** control);
+ Node* lookup_start_object, Node** effect, Node** control);
static MachineRepresentation ConvertRepresentation(
Representation representation);
@@ -83,10 +84,11 @@ class PropertyAccessBuilder {
Node* TryBuildLoadConstantDataField(NameRef const& name,
PropertyAccessInfo const& access_info,
- Node* receiver);
+ Node* lookup_start_object);
// Returns a node with the holder for the property access described by
// {access_info}.
- Node* ResolveHolder(PropertyAccessInfo const& access_info, Node* receiver);
+ Node* ResolveHolder(PropertyAccessInfo const& access_info,
+ Node* lookup_start_object);
Node* BuildLoadDataField(NameRef const& name, Node* holder,
FieldAccess& field_access, bool is_inobject,
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index fa8aaad5c5..dce807b7ab 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -632,6 +632,21 @@ void RawMachineAssembler::Return(int count, Node* vs[]) {
}
void RawMachineAssembler::PopAndReturn(Node* pop, Node* value) {
+ // PopAndReturn is supposed to be using ONLY in CSA/Torque builtins for
+ // dropping ALL JS arguments that are currently located on the stack.
+ // The check below ensures that there are no directly accessible stack
+ // parameters from current builtin, which implies that the builtin with
+ // JS calling convention (TFJ) was created with kDontAdaptArgumentsSentinel.
+ // This simplifies semantics of this instruction because in case of presence
+ // of directly accessible stack parameters it's impossible to distinguish
+ // the following cases:
+ // 1) stack parameter is included in JS arguments (and therefore it will be
+ // dropped as a part of 'pop' number of arguments),
+ // 2) stack parameter is NOT included in JS arguments (and therefore it should
+ // be dropped in ADDITION to the 'pop' number of arguments).
+ // Additionally, in order to simplify assembly code, PopAndReturn is also
+ // not allowed in builtins with stub linkage and parameters on stack.
+ CHECK_EQ(call_descriptor()->StackParameterCount(), 0);
Node* values[] = {pop, value};
Node* ret = MakeNode(common()->Return(1), 2, values);
schedule()->AddReturn(CurrentBlock(), ret);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 3fa57839a0..8509568bf5 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -56,6 +56,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
PoisoningMitigationLevel::kPoisonCriticalOnly);
~RawMachineAssembler() = default;
+ RawMachineAssembler(const RawMachineAssembler&) = delete;
+ RawMachineAssembler& operator=(const RawMachineAssembler&) = delete;
+
Isolate* isolate() const { return isolate_; }
Graph* graph() const { return graph_; }
Zone* zone() const { return graph()->zone(); }
@@ -835,6 +838,15 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return AddNode(machine()->Float64SilenceNaN(), a);
}
+ // SIMD operations.
+ Node* I64x2Splat(Node* a) { return AddNode(machine()->I64x2Splat(), a); }
+ Node* I64x2SplatI32Pair(Node* a, Node* b) {
+ return AddNode(machine()->I64x2SplatI32Pair(), a, b);
+ }
+ Node* I32x4Splat(Node* a) { return AddNode(machine()->I32x4Splat(), a); }
+ Node* I16x8Splat(Node* a) { return AddNode(machine()->I16x8Splat(), a); }
+ Node* I8x16Splat(Node* a) { return AddNode(machine()->I8x16Splat(), a); }
+
// Stack operations.
Node* LoadFramePointer() { return AddNode(machine()->LoadFramePointer()); }
Node* LoadParentFramePointer() {
@@ -1051,8 +1063,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
NodeVector parameters_;
BasicBlock* current_block_;
PoisoningMitigationLevel poisoning_level_;
-
- DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);
};
class V8_EXPORT_PRIVATE RawMachineLabel final {
@@ -1062,6 +1072,8 @@ class V8_EXPORT_PRIVATE RawMachineLabel final {
explicit RawMachineLabel(Type type = kNonDeferred)
: deferred_(type == kDeferred) {}
~RawMachineLabel();
+ RawMachineLabel(const RawMachineLabel&) = delete;
+ RawMachineLabel& operator=(const RawMachineLabel&) = delete;
BasicBlock* block() const { return block_; }
@@ -1071,7 +1083,6 @@ class V8_EXPORT_PRIVATE RawMachineLabel final {
bool bound_ = false;
bool deferred_;
friend class RawMachineAssembler;
- DISALLOW_COPY_AND_ASSIGN(RawMachineLabel);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/redundancy-elimination.h b/deps/v8/src/compiler/redundancy-elimination.h
index 5cf9f7ce61..cabdb1b41c 100644
--- a/deps/v8/src/compiler/redundancy-elimination.h
+++ b/deps/v8/src/compiler/redundancy-elimination.h
@@ -15,6 +15,8 @@ class V8_EXPORT_PRIVATE RedundancyElimination final : public AdvancedReducer {
public:
RedundancyElimination(Editor* editor, Zone* zone);
~RedundancyElimination() final;
+ RedundancyElimination(const RedundancyElimination&) = delete;
+ RedundancyElimination& operator=(const RedundancyElimination&) = delete;
const char* reducer_name() const override { return "RedundancyElimination"; }
@@ -73,8 +75,6 @@ class V8_EXPORT_PRIVATE RedundancyElimination final : public AdvancedReducer {
PathChecksForEffectNodes node_checks_;
Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(RedundancyElimination);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 46207a8b4e..2455ea3115 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -674,7 +674,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
Node* node, MachineRepresentation output_rep, Type output_type,
Node* use_node, UseInfo use_info) {
NumberMatcher m(node);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
// BigInts are not used as number constants.
DCHECK(use_info.type_check() != TypeCheckKind::kBigInt);
switch (use_info.type_check()) {
@@ -682,7 +682,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
case TypeCheckKind::kNumber:
case TypeCheckKind::kNumberOrBoolean:
case TypeCheckKind::kNumberOrOddball:
- return jsgraph()->Float64Constant(m.Value());
+ return jsgraph()->Float64Constant(m.ResolvedValue());
case TypeCheckKind::kBigInt:
case TypeCheckKind::kHeapObject:
case TypeCheckKind::kSigned32:
@@ -1089,7 +1089,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
}
case IrOpcode::kHeapConstant: {
HeapObjectMatcher m(node);
- if (m.HasValue() && m.Ref(broker_).IsBigInt() &&
+ if (m.HasResolvedValue() && m.Ref(broker_).IsBigInt() &&
use_info.truncation().IsUsedAsWord64()) {
auto bigint = m.Ref(broker_).AsBigInt();
return jsgraph()->Int64Constant(
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index 499a326f20..900516c05d 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -56,6 +56,8 @@ class V8_EXPORT_PRIVATE BasicBlock final
};
BasicBlock(Zone* zone, Id id);
+ BasicBlock(const BasicBlock&) = delete;
+ BasicBlock& operator=(const BasicBlock&) = delete;
Id id() const { return id_; }
#if DEBUG
@@ -187,8 +189,6 @@ class V8_EXPORT_PRIVATE BasicBlock final
AssemblerDebugInfo debug_info_;
#endif
Id id_;
-
- DISALLOW_COPY_AND_ASSIGN(BasicBlock);
};
std::ostream& operator<<(std::ostream&, const BasicBlock&);
@@ -202,6 +202,8 @@ std::ostream& operator<<(std::ostream&, const BasicBlock::Id&);
class V8_EXPORT_PRIVATE Schedule final : public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit Schedule(Zone* zone, size_t node_count_hint = 0);
+ Schedule(const Schedule&) = delete;
+ Schedule& operator=(const Schedule&) = delete;
// Return the block which contains {node}, if any.
BasicBlock* block(Node* node) const;
@@ -307,8 +309,6 @@ class V8_EXPORT_PRIVATE Schedule final : public NON_EXPORTED_BASE(ZoneObject) {
BasicBlockVector rpo_order_; // Reverse-post-order block list.
BasicBlock* start_;
BasicBlock* end_;
-
- DISALLOW_COPY_AND_ASSIGN(Schedule);
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, const Schedule&);
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
index 83b88cd3bf..3e57da18a0 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -101,6 +101,7 @@ namespace compiler {
V(IncBlockCounter) \
V(ResumeGenerator) \
V(SuspendGenerator) \
+ V(ThrowIfNotSuperConstructor) \
V(ThrowSuperAlreadyCalledIfNotHole) \
V(ThrowSuperNotCalledIfHole) \
V(ToObject)
@@ -459,8 +460,14 @@ class SerializerForBackgroundCompilation {
bool honor_bailout_on_uninitialized);
void ProcessNamedPropertyAccess(Hints* receiver, NameRef const& name,
FeedbackSlot slot, AccessMode access_mode);
+ void ProcessNamedSuperPropertyAccess(Hints* receiver, NameRef const& name,
+ FeedbackSlot slot,
+ AccessMode access_mode);
void ProcessNamedAccess(Hints* receiver, NamedAccessFeedback const& feedback,
AccessMode access_mode, Hints* result_hints);
+ void ProcessNamedSuperAccess(Hints* receiver,
+ NamedAccessFeedback const& feedback,
+ AccessMode access_mode, Hints* result_hints);
void ProcessElementAccess(Hints const& receiver, Hints const& key,
ElementAccessFeedback const& feedback,
AccessMode access_mode);
@@ -494,7 +501,8 @@ class SerializerForBackgroundCompilation {
bool honor_bailout_on_uninitialized);
PropertyAccessInfo ProcessMapForNamedPropertyAccess(
- Hints* receiver, MapRef receiver_map, NameRef const& name,
+ Hints* receiver, base::Optional<MapRef> receiver_map,
+ MapRef lookup_start_object_map, NameRef const& name,
AccessMode access_mode, base::Optional<JSObjectRef> concrete_receiver,
Hints* result_hints);
@@ -1109,7 +1117,8 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized(
// OSR entry point. TODO(neis): Support OSR?
return false;
}
- if (FLAG_turboprop && feedback.slot_kind() == FeedbackSlotKind::kCall) {
+ if (broker()->is_turboprop() &&
+ feedback.slot_kind() == FeedbackSlotKind::kCall) {
return false;
}
if (feedback.IsInsufficient()) {
@@ -2083,7 +2092,7 @@ void SerializerForBackgroundCompilation::ProcessCalleeForCallOrConstruct(
if (callee->IsJSBoundFunction()) {
JSBoundFunctionRef bound_function(broker(),
Handle<JSBoundFunction>::cast(callee));
- bound_function.Serialize();
+ if (!bound_function.Serialize()) return;
callee = UnrollBoundFunction(bound_function, broker(), arguments,
&expanded_arguments, zone())
.object();
@@ -2153,7 +2162,7 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
arguments->insert(arguments->begin(), result_hints_from_new_target);
}
- // For JSNativeContextSpecialization::InferReceiverRootMap
+ // For JSNativeContextSpecialization::InferRootMap
Hints new_accumulator_hints = result_hints_from_new_target.Copy(zone());
ProcessCallOrConstructRecursive(callee, new_target, *arguments,
@@ -2245,7 +2254,8 @@ void SerializerForBackgroundCompilation::ProcessApiCall(
Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver));
FunctionTemplateInfoRef target_template_info(
- broker(), handle(target->function_data(), broker()->isolate()));
+ broker(),
+ handle(target->function_data(kAcquireLoad), broker()->isolate()));
if (!target_template_info.has_call_code()) return;
target_template_info.SerializeCallCode();
@@ -2680,8 +2690,8 @@ void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest(
namespace {
void ProcessMapForFunctionBind(MapRef map) {
map.SerializePrototype();
- int min_nof_descriptors = i::Max(JSFunction::kLengthDescriptorIndex,
- JSFunction::kNameDescriptorIndex) +
+ int min_nof_descriptors = std::max({JSFunction::kLengthDescriptorIndex,
+ JSFunction::kNameDescriptorIndex}) +
1;
if (map.NumberOfOwnDescriptors() >= min_nof_descriptors) {
map.SerializeOwnDescriptor(
@@ -2960,18 +2970,20 @@ void SerializerForBackgroundCompilation::ProcessUnaryOrBinaryOperation(
PropertyAccessInfo
SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
- Hints* receiver, MapRef receiver_map, NameRef const& name,
- AccessMode access_mode, base::Optional<JSObjectRef> concrete_receiver,
- Hints* result_hints) {
- // For JSNativeContextSpecialization::InferReceiverRootMap
- receiver_map.SerializeRootMap();
+ Hints* receiver, base::Optional<MapRef> receiver_map,
+ MapRef lookup_start_object_map, NameRef const& name, AccessMode access_mode,
+ base::Optional<JSObjectRef> concrete_receiver, Hints* result_hints) {
+ DCHECK_IMPLIES(concrete_receiver.has_value(), receiver_map.has_value());
+
+ // For JSNativeContextSpecialization::InferRootMap
+ lookup_start_object_map.SerializeRootMap();
// For JSNativeContextSpecialization::ReduceNamedAccess.
JSGlobalProxyRef global_proxy =
broker()->target_native_context().global_proxy_object();
JSGlobalObjectRef global_object =
broker()->target_native_context().global_object();
- if (receiver_map.equals(global_proxy.map())) {
+ if (lookup_start_object_map.equals(global_proxy.map())) {
base::Optional<PropertyCellRef> cell = global_object.GetPropertyCell(
name, SerializationPolicy::kSerializeIfNeeded);
if (access_mode == AccessMode::kLoad && cell.has_value()) {
@@ -2980,7 +2992,7 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
}
PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
- receiver_map, name, access_mode, dependencies(),
+ lookup_start_object_map, name, access_mode, dependencies(),
SerializationPolicy::kSerializeIfNeeded);
// For JSNativeContextSpecialization::InlinePropertySetterCall
@@ -2989,25 +3001,27 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
if (access_info.constant()->IsJSFunction()) {
JSFunctionRef function(broker(), access_info.constant());
- // For JSCallReducer and JSInlining(Heuristic).
- HintsVector arguments({Hints::SingleMap(receiver_map.object(), zone())},
- zone());
- // In the case of a setter any added result hints won't make sense, but
- // they will be ignored anyways by Process*PropertyAccess due to the
- // access mode not being kLoad.
- ProcessCalleeForCallOrConstruct(
- function.object(), base::nullopt, arguments,
- SpeculationMode::kDisallowSpeculation, kMissingArgumentsAreUndefined,
- result_hints);
-
- // For JSCallReducer::ReduceCallApiFunction.
- Handle<SharedFunctionInfo> sfi = function.shared().object();
- if (sfi->IsApiFunction()) {
- FunctionTemplateInfoRef fti_ref(
- broker(), handle(sfi->get_api_func_data(), broker()->isolate()));
- if (fti_ref.has_call_code()) {
- fti_ref.SerializeCallCode();
- ProcessReceiverMapForApiCall(fti_ref, receiver_map.object());
+ if (receiver_map.has_value()) {
+ // For JSCallReducer and JSInlining(Heuristic).
+ HintsVector arguments(
+ {Hints::SingleMap(receiver_map->object(), zone())}, zone());
+ // In the case of a setter any added result hints won't make sense, but
+ // they will be ignored anyways by Process*PropertyAccess due to the
+ // access mode not being kLoad.
+ ProcessCalleeForCallOrConstruct(
+ function.object(), base::nullopt, arguments,
+ SpeculationMode::kDisallowSpeculation,
+ kMissingArgumentsAreUndefined, result_hints);
+
+ // For JSCallReducer::ReduceCallApiFunction.
+ Handle<SharedFunctionInfo> sfi = function.shared().object();
+ if (sfi->IsApiFunction()) {
+ FunctionTemplateInfoRef fti_ref(
+ broker(), handle(sfi->get_api_func_data(), broker()->isolate()));
+ if (fti_ref.has_call_code()) {
+ fti_ref.SerializeCallCode();
+ ProcessReceiverMapForApiCall(fti_ref, receiver_map->object());
+ }
}
}
} else if (access_info.constant()->IsJSBoundFunction()) {
@@ -3035,7 +3049,7 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
holder = JSObjectRef(broker(), prototype);
} else {
CHECK_IMPLIES(concrete_receiver.has_value(),
- concrete_receiver->map().equals(receiver_map));
+ concrete_receiver->map().equals(*receiver_map));
holder = concrete_receiver;
}
@@ -3149,6 +3163,38 @@ void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
}
}
+void SerializerForBackgroundCompilation::ProcessNamedSuperPropertyAccess(
+ Hints* receiver, NameRef const& name, FeedbackSlot slot,
+ AccessMode access_mode) {
+ if (slot.IsInvalid() || feedback_vector().is_null()) return;
+ FeedbackSource source(feedback_vector(), slot);
+ ProcessedFeedback const& feedback =
+ broker()->ProcessFeedbackForPropertyAccess(source, access_mode, name);
+ if (BailoutOnUninitialized(feedback)) return;
+
+ Hints new_accumulator_hints;
+ switch (feedback.kind()) {
+ case ProcessedFeedback::kNamedAccess:
+ DCHECK(name.equals(feedback.AsNamedAccess().name()));
+ ProcessNamedSuperAccess(receiver, feedback.AsNamedAccess(), access_mode,
+ &new_accumulator_hints);
+ break;
+ case ProcessedFeedback::kMinimorphicPropertyAccess:
+ DCHECK(name.equals(feedback.AsMinimorphicPropertyAccess().name()));
+ ProcessMinimorphicPropertyAccess(feedback.AsMinimorphicPropertyAccess(),
+ source);
+ break;
+ case ProcessedFeedback::kInsufficient:
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (access_mode == AccessMode::kLoad) {
+ environment()->accumulator_hints() = new_accumulator_hints;
+ }
+}
+
void SerializerForBackgroundCompilation::ProcessNamedAccess(
Hints* receiver, NamedAccessFeedback const& feedback,
AccessMode access_mode, Hints* result_hints) {
@@ -3162,17 +3208,18 @@ void SerializerForBackgroundCompilation::ProcessNamedAccess(
for (Handle<Map> map :
GetRelevantReceiverMaps(broker()->isolate(), receiver->maps())) {
MapRef map_ref(broker(), map);
- ProcessMapForNamedPropertyAccess(receiver, map_ref, feedback.name(),
- access_mode, base::nullopt, result_hints);
+ ProcessMapForNamedPropertyAccess(receiver, map_ref, map_ref,
+ feedback.name(), access_mode,
+ base::nullopt, result_hints);
}
for (Handle<Object> hint : receiver->constants()) {
ObjectRef object(broker(), hint);
if (access_mode == AccessMode::kLoad && object.IsJSObject()) {
MapRef map_ref = object.AsJSObject().map();
- ProcessMapForNamedPropertyAccess(receiver, map_ref, feedback.name(),
- access_mode, object.AsJSObject(),
- result_hints);
+ ProcessMapForNamedPropertyAccess(receiver, map_ref, map_ref,
+ feedback.name(), access_mode,
+ object.AsJSObject(), result_hints);
}
// For JSNativeContextSpecialization::ReduceJSLoadNamed.
if (access_mode == AccessMode::kLoad && object.IsJSFunction() &&
@@ -3190,6 +3237,30 @@ void SerializerForBackgroundCompilation::ProcessNamedAccess(
}
}
+void SerializerForBackgroundCompilation::ProcessNamedSuperAccess(
+ Hints* receiver, NamedAccessFeedback const& feedback,
+ AccessMode access_mode, Hints* result_hints) {
+ MapHandles receiver_maps =
+ GetRelevantReceiverMaps(broker()->isolate(), receiver->maps());
+ for (Handle<Map> receiver_map : receiver_maps) {
+ MapRef receiver_map_ref(broker(), receiver_map);
+ for (Handle<Map> feedback_map : feedback.maps()) {
+ MapRef feedback_map_ref(broker(), feedback_map);
+ ProcessMapForNamedPropertyAccess(
+ receiver, receiver_map_ref, feedback_map_ref, feedback.name(),
+ access_mode, base::nullopt, result_hints);
+ }
+ }
+ if (receiver_maps.empty()) {
+ for (Handle<Map> feedback_map : feedback.maps()) {
+ MapRef feedback_map_ref(broker(), feedback_map);
+ ProcessMapForNamedPropertyAccess(
+ receiver, base::nullopt, feedback_map_ref, feedback.name(),
+ access_mode, base::nullopt, result_hints);
+ }
+ }
+}
+
void SerializerForBackgroundCompilation::ProcessElementAccess(
Hints const& receiver, Hints const& key,
ElementAccessFeedback const& feedback, AccessMode access_mode) {
@@ -3214,7 +3285,7 @@ void SerializerForBackgroundCompilation::ProcessElementAccess(
for (Handle<Object> hint : receiver.constants()) {
ObjectRef receiver_ref(broker(), hint);
- // For JSNativeContextSpecialization::InferReceiverRootMap
+ // For JSNativeContextSpecialization::InferRootMap
if (receiver_ref.IsHeapObject()) {
receiver_ref.AsHeapObject().map().SerializeRootMap();
}
@@ -3245,7 +3316,7 @@ void SerializerForBackgroundCompilation::ProcessElementAccess(
}
}
- // For JSNativeContextSpecialization::InferReceiverRootMap
+ // For JSNativeContextSpecialization::InferRootMap
for (Handle<Map> map : receiver.maps()) {
MapRef map_ref(broker(), map);
map_ref.SerializeRootMap();
@@ -3263,9 +3334,11 @@ void SerializerForBackgroundCompilation::VisitLdaNamedProperty(
void SerializerForBackgroundCompilation::VisitLdaNamedPropertyFromSuper(
BytecodeArrayIterator* iterator) {
- NameRef(broker(),
- iterator->GetConstantForIndexOperand(1, broker()->isolate()));
- // TODO(marja, v8:9237): Process feedback once it's added to the byte code.
+ Hints* receiver = &register_hints(iterator->GetRegisterOperand(0));
+ NameRef name(broker(),
+ iterator->GetConstantForIndexOperand(1, broker()->isolate()));
+ FeedbackSlot slot = iterator->GetSlotOperand(2);
+ ProcessNamedSuperPropertyAccess(receiver, name, slot, AccessMode::kLoad);
}
// TODO(neis): Do feedback-independent serialization also for *NoFeedback
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index f832107939..36d590b1aa 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -141,6 +141,7 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4ShrU) \
V(I32x4MinU) \
V(I32x4MaxU) \
+ V(I32x4DotI16x8S) \
V(I32x4Eq) \
V(I32x4Ne) \
V(I32x4LtS) \
@@ -158,6 +159,8 @@ void SimdScalarLowering::LowerGraph() {
V(S128Or) \
V(S128Xor) \
V(S128Not) \
+ V(S128AndNot) \
+ V(S128Select) \
V(V32x4AnyTrue) \
V(V32x4AllTrue) \
V(V16x8AnyTrue) \
@@ -178,7 +181,13 @@ void SimdScalarLowering::LowerGraph() {
V(F64x2Mul) \
V(F64x2Div) \
V(F64x2Min) \
- V(F64x2Max)
+ V(F64x2Max) \
+ V(F64x2Pmin) \
+ V(F64x2Pmax) \
+ V(F64x2Ceil) \
+ V(F64x2Floor) \
+ V(F64x2Trunc) \
+ V(F64x2NearestInt)
#define FOREACH_FLOAT32X4_OPCODE(V) \
V(F32x4Splat) \
@@ -197,7 +206,13 @@ void SimdScalarLowering::LowerGraph() {
V(F32x4Mul) \
V(F32x4Div) \
V(F32x4Min) \
- V(F32x4Max)
+ V(F32x4Max) \
+ V(F32x4Pmin) \
+ V(F32x4Pmax) \
+ V(F32x4Ceil) \
+ V(F32x4Floor) \
+ V(F32x4Trunc) \
+ V(F32x4NearestInt)
#define FOREACH_FLOAT64x2_TO_INT64x2OPCODE(V) \
V(F64x2Eq) \
@@ -225,10 +240,10 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8ShrS) \
V(I16x8SConvertI32x4) \
V(I16x8Add) \
- V(I16x8AddSaturateS) \
+ V(I16x8AddSatS) \
V(I16x8AddHoriz) \
V(I16x8Sub) \
- V(I16x8SubSaturateS) \
+ V(I16x8SubSatS) \
V(I16x8Mul) \
V(I16x8MinS) \
V(I16x8MaxS) \
@@ -236,8 +251,8 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8UConvertI8x16High) \
V(I16x8ShrU) \
V(I16x8UConvertI32x4) \
- V(I16x8AddSaturateU) \
- V(I16x8SubSaturateU) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
V(I16x8MinU) \
V(I16x8MaxU) \
V(I16x8Eq) \
@@ -264,16 +279,16 @@ void SimdScalarLowering::LowerGraph() {
V(I8x16Shl) \
V(I8x16ShrS) \
V(I8x16Add) \
- V(I8x16AddSaturateS) \
+ V(I8x16AddSatS) \
V(I8x16Sub) \
- V(I8x16SubSaturateS) \
+ V(I8x16SubSatS) \
V(I8x16Mul) \
V(I8x16MinS) \
V(I8x16MaxS) \
V(I8x16ShrU) \
V(I8x16UConvertI16x8) \
- V(I8x16AddSaturateU) \
- V(I8x16SubSaturateU) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU) \
V(I8x16MinU) \
V(I8x16MaxU) \
V(I8x16Eq) \
@@ -324,6 +339,7 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
FOREACH_INT32X4_OPCODE(CASE_STMT)
case IrOpcode::kReturn:
case IrOpcode::kParameter:
+ case IrOpcode::kPhi:
case IrOpcode::kCall: {
replacements_[node->id()].type = SimdType::kInt32x4;
break;
@@ -351,28 +367,24 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
case IrOpcode::kLoadTransform: {
LoadTransformParameters params = LoadTransformParametersOf(node->op());
switch (params.transformation) {
- case LoadTransformation::kS8x16LoadSplat:
+ case LoadTransformation::kS128Load8Splat:
replacements_[node->id()].type = SimdType::kInt8x16;
break;
- case LoadTransformation::kS16x8LoadSplat:
+ case LoadTransformation::kS128Load16Splat:
+ case LoadTransformation::kS128Load8x8S:
+ case LoadTransformation::kS128Load8x8U:
replacements_[node->id()].type = SimdType::kInt16x8;
break;
- case LoadTransformation::kS32x4LoadSplat:
+ case LoadTransformation::kS128Load32Splat:
+ case LoadTransformation::kS128Load16x4S:
+ case LoadTransformation::kS128Load16x4U:
+ case LoadTransformation::kS128Load32Zero:
replacements_[node->id()].type = SimdType::kInt32x4;
break;
- case LoadTransformation::kS64x2LoadSplat:
- replacements_[node->id()].type = SimdType::kInt64x2;
- break;
- case LoadTransformation::kI16x8Load8x8S:
- case LoadTransformation::kI16x8Load8x8U:
- replacements_[node->id()].type = SimdType::kInt16x8;
- break;
- case LoadTransformation::kI32x4Load16x4S:
- case LoadTransformation::kI32x4Load16x4U:
- replacements_[node->id()].type = SimdType::kInt32x4;
- break;
- case LoadTransformation::kI64x2Load32x2S:
- case LoadTransformation::kI64x2Load32x2U:
+ case LoadTransformation::kS128Load64Splat:
+ case LoadTransformation::kS128Load32x2S:
+ case LoadTransformation::kS128Load32x2U:
+ case LoadTransformation::kS128Load64Zero:
replacements_[node->id()].type = SimdType::kInt64x2;
break;
default:
@@ -499,11 +511,20 @@ void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices,
int num_lanes = NumLanes(type);
int lane_width = kSimd128Size / num_lanes;
int laneIndex = kLaneOffsets[0] / lane_width;
- new_indices[laneIndex] = index;
+
+ Node* rep = index;
+
+ if (HasReplacement(0, index)) {
+ // Index nodes are lowered to scalar nodes.
+ DCHECK_EQ(1, ReplacementCount(index));
+ rep = GetReplacements(index)[0];
+ }
+
+ new_indices[laneIndex] = rep;
for (int i = 1; i < num_lanes; ++i) {
laneIndex = kLaneOffsets[i * lane_width] / lane_width;
new_indices[laneIndex] = graph()->NewNode(
- machine()->Int32Add(), index,
+ machine()->Int32Add(), rep,
graph()->NewNode(
common()->Int32Constant(static_cast<int>(i) * lane_width)));
}
@@ -563,53 +584,53 @@ void SimdScalarLowering::LowerLoadTransformOp(Node* node, SimdType type) {
// Load extends have a different machine type for loading.
switch (params.transformation) {
- case LoadTransformation::kI16x8Load8x8S:
+ case LoadTransformation::kS128Load8x8S:
load_rep = MachineType::Int8();
load_type = SimdType::kInt8x16;
break;
- case LoadTransformation::kI16x8Load8x8U:
+ case LoadTransformation::kS128Load8x8U:
load_rep = MachineType::Uint8();
load_type = SimdType::kInt8x16;
break;
- case LoadTransformation::kI32x4Load16x4S:
+ case LoadTransformation::kS128Load16x4S:
load_rep = MachineType::Int16();
load_type = SimdType::kInt16x8;
break;
- case LoadTransformation::kI32x4Load16x4U:
+ case LoadTransformation::kS128Load16x4U:
load_rep = MachineType::Uint16();
load_type = SimdType::kInt16x8;
break;
- case LoadTransformation::kI64x2Load32x2S:
+ case LoadTransformation::kS128Load32x2S:
load_rep = MachineType::Int32();
load_type = SimdType::kInt32x4;
break;
- case LoadTransformation::kI64x2Load32x2U:
+ case LoadTransformation::kS128Load32x2U:
load_rep = MachineType::Uint32();
load_type = SimdType::kInt32x4;
break;
- case LoadTransformation::kS8x16LoadSplat:
- case LoadTransformation::kS16x8LoadSplat:
- case LoadTransformation::kS32x4LoadSplat:
- case LoadTransformation::kS64x2LoadSplat:
+ case LoadTransformation::kS128Load8Splat:
+ case LoadTransformation::kS128Load16Splat:
+ case LoadTransformation::kS128Load32Splat:
+ case LoadTransformation::kS128Load64Splat:
+ case LoadTransformation::kS128Load32Zero:
+ case LoadTransformation::kS128Load64Zero:
load_rep = MachineTypeFrom(type);
break;
default:
- // Lowering for s64x2 is not implemented since lowering for 64x2
- // operations doesn't work properly yet.
- UNIMPLEMENTED();
+ UNREACHABLE();
}
DCHECK_NE(load_rep, MachineType::None());
const Operator* load_op;
switch (params.kind) {
- case LoadKind::kNormal:
+ case MemoryAccessKind::kNormal:
load_op = machine()->Load(load_rep);
break;
- case LoadKind::kUnaligned:
+ case MemoryAccessKind::kUnaligned:
load_op = machine()->UnalignedLoad(load_rep);
break;
- case LoadKind::kProtected:
+ case MemoryAccessKind::kProtected:
load_op = machine()->ProtectedLoad(load_rep);
break;
}
@@ -617,39 +638,71 @@ void SimdScalarLowering::LowerLoadTransformOp(Node* node, SimdType type) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
int num_lanes = NumLanes(type);
- Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
+ Node** reps = zone()->NewArray<Node*>(num_lanes);
Node* effect_input = node->InputAt(2);
Node* control_input = node->InputAt(3);
+ // This node is also used as effect input into other nodes, so we need to
+ // change this node in place.
+ reps[0] = node;
+ NodeProperties::ChangeOp(reps[0], load_op);
+
if (type != load_type) {
// We load a smaller lane size, then extend to a larger lane size. So use
// the smaller lane size to calculte the index nodes for loads, but only
// actually load half of those lanes.
Node** indices = zone()->NewArray<Node*>(num_lanes * 2);
GetIndexNodes(index, indices, load_type);
- for (int i = num_lanes - 1; i >= 0; --i) {
- rep_nodes[i] = graph()->NewNode(load_op, base, indices[i], effect_input,
- control_input);
- effect_input = rep_nodes[i];
- // Load operations are Word32 nodes, change them to Word64.
- if (params.transformation == LoadTransformation::kI64x2Load32x2S) {
- rep_nodes[i] =
- graph()->NewNode(machine()->ChangeInt32ToInt64(), rep_nodes[i]);
- } else if (params.transformation == LoadTransformation::kI64x2Load32x2U) {
- rep_nodes[i] =
- graph()->NewNode(machine()->ChangeUint32ToUint64(), rep_nodes[i]);
- }
+ reps[0]->ReplaceInput(1, indices[0]);
+
+ for (int i = num_lanes - 1; i > 0; --i) {
+ reps[i] = graph()->NewNode(load_op, base, indices[i], effect_input,
+ control_input);
+ effect_input = reps[i];
}
} else {
- // Load splat, load from the same index for every lane.
- for (int i = num_lanes - 1; i >= 0; --i) {
- rep_nodes[i] =
- graph()->NewNode(load_op, base, index, effect_input, control_input);
- effect_input = rep_nodes[i];
+ if (params.transformation == LoadTransformation::kS128Load32Zero) {
+ for (int i = num_lanes - 1; i > 0; --i) {
+ reps[i] = mcgraph_->Int32Constant(0);
+ }
+ } else if (params.transformation == LoadTransformation::kS128Load64Zero) {
+ for (int i = num_lanes - 1; i > 0; --i) {
+ reps[i] = mcgraph_->Int64Constant(0);
+ }
+ } else {
+ // Load splat, load from the same index for every lane.
+ Node* rep = HasReplacement(0, index) ? GetReplacements(index)[0] : index;
+
+ // Replace first node, we only called ChangeOp above.
+ reps[0]->ReplaceInput(1, rep);
+ for (int i = num_lanes - 1; i > 0; --i) {
+ reps[i] =
+ graph()->NewNode(load_op, base, rep, effect_input, control_input);
+ effect_input = reps[i];
+ }
+ }
+ }
+
+ // Update the effect input, completing the effect chain, but only if there is
+ // an effect output (LoadZero does not have an effect output, it is zero).
+ if (reps[1]->op()->EffectOutputCount() > 0) {
+ reps[0]->ReplaceInput(2, reps[1]);
+ }
+
+ // Special case, the load nodes need to be sign extended, and we do it here so
+ // the loop above can connect all the effect edges correctly.
+ if (params.transformation == LoadTransformation::kS128Load32x2S) {
+ for (int i = 0; i < num_lanes; ++i) {
+ reps[i] = graph()->NewNode(machine()->ChangeInt32ToInt64(), reps[i]);
+ }
+ } else if (params.transformation == LoadTransformation::kS128Load32x2U) {
+ for (int i = 0; i < num_lanes; ++i) {
+ reps[i] = graph()->NewNode(machine()->ChangeUint32ToUint64(), reps[i]);
}
}
- ReplaceNode(node, rep_nodes, num_lanes);
+
+ ReplaceNode(node, reps, num_lanes);
}
void SimdScalarLowering::LowerStoreOp(Node* node) {
@@ -947,8 +1000,9 @@ void SimdScalarLowering::LowerConvertFromFloat(Node* node, bool is_signed) {
for (int i = 0; i < kNumLanes32; ++i) {
Node* double_rep =
graph()->NewNode(machine()->ChangeFloat32ToFloat64(), rep[i]);
- Diamond nan_d(graph(), common(), graph()->NewNode(machine()->Float64Equal(),
- double_rep, double_rep));
+ Diamond nan_d(
+ graph(), common(),
+ graph()->NewNode(machine()->Float64Equal(), double_rep, double_rep));
Node* temp =
nan_d.Phi(MachineRepresentation::kFloat64, double_rep, double_zero);
Diamond min_d(graph(), common(),
@@ -1003,25 +1057,28 @@ void SimdScalarLowering::LowerPack(Node* node, SimdType input_rep_type,
const Operator* less_op = machine()->Int32LessThan();
Node* min = nullptr;
Node* max = nullptr;
+ const Operator* sign_extend;
MachineRepresentation phi_rep;
if (output_rep_type == SimdType::kInt16x8) {
+ sign_extend = machine()->SignExtendWord16ToInt32();
DCHECK(input_rep_type == SimdType::kInt32x4);
if (is_signed) {
min = mcgraph_->Int32Constant(std::numeric_limits<int16_t>::min());
max = mcgraph_->Int32Constant(std::numeric_limits<int16_t>::max());
} else {
- min = mcgraph_->Int32Constant(std::numeric_limits<uint16_t>::min());
+ min = mcgraph_->Uint32Constant(std::numeric_limits<uint16_t>::min());
max = mcgraph_->Uint32Constant(std::numeric_limits<uint16_t>::max());
}
phi_rep = MachineRepresentation::kWord16;
} else {
+ sign_extend = machine()->SignExtendWord8ToInt32();
DCHECK(output_rep_type == SimdType::kInt8x16 &&
input_rep_type == SimdType::kInt16x8);
if (is_signed) {
min = mcgraph_->Int32Constant(std::numeric_limits<int8_t>::min());
max = mcgraph_->Int32Constant(std::numeric_limits<int8_t>::max());
} else {
- min = mcgraph_->Int32Constant(std::numeric_limits<uint8_t>::min());
+ min = mcgraph_->Uint32Constant(std::numeric_limits<uint8_t>::min());
max = mcgraph_->Uint32Constant(std::numeric_limits<uint8_t>::max());
}
phi_rep = MachineRepresentation::kWord8;
@@ -1037,14 +1094,24 @@ void SimdScalarLowering::LowerPack(Node* node, SimdType input_rep_type,
Diamond d_min(graph(), common(), graph()->NewNode(less_op, input, min));
input = d_min.Phi(phi_rep, min, input);
Diamond d_max(graph(), common(), graph()->NewNode(less_op, max, input));
- rep_node[i] = d_max.Phi(phi_rep, max, input);
+ // We keep nodes in sign-extended form. E.g. for uint8_t, we need to
+ // compare with 0x000000ff (saturated narrowing), but the result of
+ // conversion should be 0xffffffff to work well with the rest of lowering.
+ rep_node[i] = graph()->NewNode(sign_extend, d_max.Phi(phi_rep, max, input));
}
ReplaceNode(node, rep_node, num_lanes);
}
void SimdScalarLowering::LowerShiftOp(Node* node, SimdType type) {
DCHECK_EQ(2, node->InputCount());
- Node* shift_node = Mask(node->InputAt(1), GetMaskForShift(node));
+
+ // The shift node, if it has a replacement, should be a single scalar.
+ DCHECK_GE(1, ReplacementCount(node->InputAt(1)));
+ Node* val = (HasReplacement(0, node->InputAt(1)))
+ ? GetReplacements(node->InputAt(1))[0]
+ : node->InputAt(1);
+
+ Node* shift_node = Mask(val, GetMaskForShift(node));
Node** rep = GetReplacementsWithType(node->InputAt(0), type);
int num_lanes = NumLanes(type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
@@ -1191,8 +1258,22 @@ void SimdScalarLowering::LowerAllTrueOp(Node* node, SimdType rep_type) {
tmp_result = d.Phi(MachineRepresentation::kWord32, zero, tmp_result);
}
rep_node[0] = tmp_result;
- for (int i = 1; i < num_lanes; ++i) {
- rep_node[i] = nullptr;
+ ReplaceNode(node, rep_node, 1);
+}
+
+void SimdScalarLowering::LowerFloatPseudoMinMax(Node* node, const Operator* op,
+ bool is_max, SimdType type) {
+ DCHECK_EQ(2, node->InputCount());
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), type);
+ int num_lanes = NumLanes(type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ MachineRepresentation rep = MachineTypeFrom(type).representation();
+ for (int i = 0; i < num_lanes; ++i) {
+ Node* cmp = is_max ? graph()->NewNode(op, rep_left[i], rep_right[i])
+ : graph()->NewNode(op, rep_right[i], rep_left[i]);
+ Diamond d(graph(), common(), cmp);
+ rep_node[i] = d.Phi(rep, rep_right[i], rep_left[i]);
}
ReplaceNode(node, rep_node, num_lanes);
}
@@ -1215,9 +1296,8 @@ void SimdScalarLowering::LowerNode(Node* node) {
case SimdType::kInt8x16: {
for (int i = 0; i < num_lanes; ++i) {
Address data_address = reinterpret_cast<Address>(params.data() + i);
- rep_node[i] = mcgraph_->Int32Constant(
- static_cast<int32_t>(static_cast<int8_t>(
- base::ReadLittleEndianValue<int8_t>(data_address))));
+ rep_node[i] = mcgraph_->Int32Constant(static_cast<int32_t>(
+ base::ReadLittleEndianValue<int8_t>(data_address)));
}
break;
}
@@ -1402,7 +1482,11 @@ void SimdScalarLowering::LowerNode(Node* node) {
// arguments need to be converted to i32x4 as well.
for (int i = NodeProperties::PastValueIndex(node) - 1; i >= 0; i--) {
Node* input = node->InputAt(i);
- if (HasReplacement(0, input)) {
+ if (ReplacementCount(input) == 1) {
+ // Special case for extract lanes
+ Node** reps = GetReplacements(input);
+ ReplaceNode(input, reps, 1);
+ } else if (HasReplacement(0, input)) {
Node** reps = GetReplacementsWithType(input, SimdType::kInt32x4);
ReplaceNode(input, reps, NumLanes(SimdType::kInt32x4));
}
@@ -1527,23 +1611,23 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Mul());
break;
}
- case IrOpcode::kI16x8AddSaturateS:
- case IrOpcode::kI8x16AddSaturateS: {
+ case IrOpcode::kI16x8AddSatS:
+ case IrOpcode::kI8x16AddSatS: {
LowerSaturateBinaryOp(node, rep_type, machine()->Int32Add(), true);
break;
}
- case IrOpcode::kI16x8SubSaturateS:
- case IrOpcode::kI8x16SubSaturateS: {
+ case IrOpcode::kI16x8SubSatS:
+ case IrOpcode::kI8x16SubSatS: {
LowerSaturateBinaryOp(node, rep_type, machine()->Int32Sub(), true);
break;
}
- case IrOpcode::kI16x8AddSaturateU:
- case IrOpcode::kI8x16AddSaturateU: {
+ case IrOpcode::kI16x8AddSatU:
+ case IrOpcode::kI8x16AddSatU: {
LowerSaturateBinaryOp(node, rep_type, machine()->Int32Add(), false);
break;
}
- case IrOpcode::kI16x8SubSaturateU:
- case IrOpcode::kI8x16SubSaturateU: {
+ case IrOpcode::kI16x8SubSatU:
+ case IrOpcode::kI8x16SubSatU: {
LowerSaturateBinaryOp(node, rep_type, machine()->Int32Sub(), false);
break;
}
@@ -1571,6 +1655,25 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerIntMinMax(node, machine()->Uint32LessThan(), false, rep_type);
break;
}
+ case IrOpcode::kI32x4DotI16x8S: {
+ // i32x4.dot_i16x8_s wants the inputs to be i16x8, but outputs to i32x4.
+ DCHECK_EQ(2, node->InputCount());
+ Node** rep_left =
+ GetReplacementsWithType(node->InputAt(0), SimdType::kInt16x8);
+ Node** rep_right =
+ GetReplacementsWithType(node->InputAt(1), SimdType::kInt16x8);
+ int num_lanes = NumLanes(rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
+ Node* lo = graph()->NewNode(machine()->Int32Mul(), rep_left[i * 2],
+ rep_right[i * 2]);
+ Node* hi = graph()->NewNode(machine()->Int32Mul(), rep_left[i * 2 + 1],
+ rep_right[i * 2 + 1]);
+ rep_node[i] = graph()->NewNode(machine()->Int32Add(), lo, hi);
+ }
+ ReplaceNode(node, rep_node, num_lanes);
+ break;
+ }
case IrOpcode::kI64x2Neg: {
DCHECK_EQ(1, node->InputCount());
Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
@@ -1756,6 +1859,16 @@ void SimdScalarLowering::LowerNode(Node* node) {
F32X4_BINOP_CASE(Div)
F32X4_BINOP_CASE(Min)
F32X4_BINOP_CASE(Max)
+ case IrOpcode::kF32x4Pmin: {
+ LowerFloatPseudoMinMax(node, machine()->Float32LessThan(), false,
+ rep_type);
+ break;
+ }
+ case IrOpcode::kF32x4Pmax: {
+ LowerFloatPseudoMinMax(node, machine()->Float32LessThan(), true,
+ rep_type);
+ break;
+ }
#undef F32X4_BINOP_CASE
#define F32X4_UNOP_CASE(name) \
case IrOpcode::kF32x4##name: { \
@@ -1766,6 +1879,22 @@ void SimdScalarLowering::LowerNode(Node* node) {
F32X4_UNOP_CASE(Neg)
F32X4_UNOP_CASE(Sqrt)
#undef F32X4_UNOP_CASE
+ case IrOpcode::kF32x4Ceil: {
+ LowerUnaryOp(node, rep_type, machine()->Float32RoundUp().op());
+ break;
+ }
+ case IrOpcode::kF32x4Floor: {
+ LowerUnaryOp(node, rep_type, machine()->Float32RoundDown().op());
+ break;
+ }
+ case IrOpcode::kF32x4Trunc: {
+ LowerUnaryOp(node, rep_type, machine()->Float32RoundTruncate().op());
+ break;
+ }
+ case IrOpcode::kF32x4NearestInt: {
+ LowerUnaryOp(node, rep_type, machine()->Float32RoundTiesEven().op());
+ break;
+ }
case IrOpcode::kF32x4RecipApprox:
case IrOpcode::kF32x4RecipSqrtApprox: {
DCHECK_EQ(1, node->InputCount());
@@ -1826,6 +1955,32 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerBinaryOp(node, rep_type, machine()->Float64Max());
break;
}
+ case IrOpcode::kF64x2Pmin: {
+ LowerFloatPseudoMinMax(node, machine()->Float64LessThan(), false,
+ rep_type);
+ break;
+ }
+ case IrOpcode::kF64x2Pmax: {
+ LowerFloatPseudoMinMax(node, machine()->Float64LessThan(), true,
+ rep_type);
+ break;
+ }
+ case IrOpcode::kF64x2Ceil: {
+ LowerUnaryOp(node, rep_type, machine()->Float64RoundUp().op());
+ break;
+ }
+ case IrOpcode::kF64x2Floor: {
+ LowerUnaryOp(node, rep_type, machine()->Float64RoundDown().op());
+ break;
+ }
+ case IrOpcode::kF64x2Trunc: {
+ LowerUnaryOp(node, rep_type, machine()->Float64RoundTruncate().op());
+ break;
+ }
+ case IrOpcode::kF64x2NearestInt: {
+ LowerUnaryOp(node, rep_type, machine()->Float64RoundTiesEven().op());
+ break;
+ }
case IrOpcode::kF64x2Splat:
case IrOpcode::kF32x4Splat:
case IrOpcode::kI64x2Splat:
@@ -1862,13 +2017,17 @@ void SimdScalarLowering::LowerNode(Node* node) {
case IrOpcode::kI8x16ExtractLaneU:
case IrOpcode::kI8x16ExtractLaneS: {
int32_t lane = OpParameter<int32_t>(node->op());
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ Node** rep_node = zone()->NewArray<Node*>(1);
rep_node[0] = GetReplacementsWithType(node->InputAt(0), rep_type)[lane];
- for (int i = 1; i < num_lanes; ++i) {
- rep_node[i] = nullptr;
+
+ // If unsigned, mask the top bits.
+ if (node->opcode() == IrOpcode::kI16x8ExtractLaneU) {
+ rep_node[0] = Mask(rep_node[0], kMask16);
+ } else if (node->opcode() == IrOpcode::kI8x16ExtractLaneU) {
+ rep_node[0] = Mask(rep_node[0], kMask8);
}
- ReplaceNode(node, rep_node, num_lanes);
+ ReplaceNode(node, rep_node, 1);
break;
}
case IrOpcode::kF64x2ReplaceLane:
@@ -1890,6 +2049,17 @@ void SimdScalarLowering::LowerNode(Node* node) {
} else {
rep_node[lane] = repNode;
}
+
+ // The replacement nodes for these opcodes are in Word32, and we always
+ // store nodes in sign extended form (and mask to account for overflows.)
+ if (node->opcode() == IrOpcode::kI16x8ReplaceLane) {
+ rep_node[lane] = graph()->NewNode(machine()->SignExtendWord16ToInt32(),
+ Mask(rep_node[lane], kMask16));
+ } else if (node->opcode() == IrOpcode::kI8x16ReplaceLane) {
+ rep_node[lane] = graph()->NewNode(machine()->SignExtendWord8ToInt32(),
+ Mask(rep_node[lane], kMask8));
+ }
+
ReplaceNode(node, rep_node, num_lanes);
break;
}
@@ -2035,7 +2205,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
// but we still need GetReplacementsWithType if input is float.
DCHECK_EQ(ReplacementType(node), SimdType::kInt32x4);
Node** reps = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ Node** rep_node = zone()->NewArray<Node*>(1);
Node* true_node = mcgraph_->Int32Constant(1);
Node* zero = mcgraph_->Int32Constant(0);
Node* tmp_result = zero;
@@ -2046,10 +2216,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
d.Phi(MachineRepresentation::kWord32, tmp_result, true_node);
}
rep_node[0] = tmp_result;
- for (int i = 1; i < num_lanes; ++i) {
- rep_node[i] = nullptr;
- }
- ReplaceNode(node, rep_node, num_lanes);
+ ReplaceNode(node, rep_node, 1);
break;
}
case IrOpcode::kV32x4AllTrue: {
@@ -2103,7 +2270,9 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, num_lanes);
break;
}
- default: { DefaultLowering(node); }
+ default: {
+ DefaultLowering(node);
+ }
}
}
@@ -2237,7 +2406,7 @@ void SimdScalarLowering::Int32ToSmallerInt(Node** replacements, Node** result) {
for (int j = 0; j < num_ints; j++) {
result[num_ints * i + j] = graph()->NewNode(
sign_extend,
- graph()->NewNode(machine()->Word32Sar(), replacements[i],
+ graph()->NewNode(machine()->Word32Shr(), replacements[i],
mcgraph_->Int32Constant(j * bit_size)));
}
} else {
@@ -2284,89 +2453,239 @@ void SimdScalarLowering::Int32ToInt64(Node** replacements, Node** result) {
}
Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
+ // Operations like extract lane, bitmask, any_true, all_true replaces a SIMD
+ // node with a scalar. Those won't be correctly handled here. They should be
+ // special cased and replaced with the appropriate scalar.
+ DCHECK_LT(1, ReplacementCount(node));
+
Node** replacements = GetReplacements(node);
- if (ReplacementType(node) == type) {
- return GetReplacements(node);
+ if (type == ReplacementType(node)) {
+ return replacements;
}
+
int num_lanes = NumLanes(type);
Node** result = zone()->NewArray<Node*>(num_lanes);
- if (type == SimdType::kInt64x2) {
- if (ReplacementType(node) == SimdType::kInt32x4) {
- Int32ToInt64(replacements, result);
- } else if (ReplacementType(node) == SimdType::kFloat64x2) {
- Float64ToInt64(replacements, result);
- } else {
- UNIMPLEMENTED();
+
+ switch (type) {
+ case SimdType::kInt64x2: {
+ switch (ReplacementType(node)) {
+ case SimdType::kInt64x2: {
+ UNREACHABLE();
+ }
+ case SimdType::kInt32x4: {
+ Int32ToInt64(replacements, result);
+ break;
+ }
+ case SimdType::kInt16x8: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ SmallerIntToInt32<int16_t>(replacements, to_int32);
+ Int32ToInt64(to_int32, result);
+ break;
+ }
+ case SimdType::kInt8x16: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ SmallerIntToInt32<int8_t>(replacements, to_int32);
+ Int32ToInt64(to_int32, result);
+ break;
+ }
+ case SimdType::kFloat64x2: {
+ Float64ToInt64(replacements, result);
+ break;
+ }
+ case SimdType::kFloat32x4: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Float32ToInt32(replacements, to_int32);
+ Int32ToInt64(to_int32, result);
+ break;
+ }
+ }
+ break;
}
- } else if (type == SimdType::kInt32x4) {
- if (ReplacementType(node) == SimdType::kInt64x2) {
- Int64ToInt32(replacements, result);
- } else if (ReplacementType(node) == SimdType::kFloat64x2) {
- Node** float64_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
- Float64ToInt64(replacements, float64_to_int64);
- Int64ToInt32(float64_to_int64, result);
- } else if (ReplacementType(node) == SimdType::kFloat32x4) {
- Float32ToInt32(replacements, result);
- } else if (ReplacementType(node) == SimdType::kInt16x8) {
- SmallerIntToInt32<int16_t>(replacements, result);
- } else if (ReplacementType(node) == SimdType::kInt8x16) {
- SmallerIntToInt32<int8_t>(replacements, result);
- } else {
- UNREACHABLE();
+ case SimdType::kInt32x4: {
+ switch (ReplacementType(node)) {
+ case SimdType::kInt64x2: {
+ Int64ToInt32(replacements, result);
+ break;
+ }
+ case SimdType::kInt32x4: {
+ UNREACHABLE();
+ }
+ case SimdType::kInt16x8: {
+ SmallerIntToInt32<int16_t>(replacements, result);
+ break;
+ }
+ case SimdType::kInt8x16: {
+ SmallerIntToInt32<int8_t>(replacements, result);
+ break;
+ }
+ case SimdType::kFloat64x2: {
+ Node** float64_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ Float64ToInt64(replacements, float64_to_int64);
+ Int64ToInt32(float64_to_int64, result);
+ break;
+ }
+ case SimdType::kFloat32x4: {
+ Float32ToInt32(replacements, result);
+ break;
+ }
+ }
+ break;
}
- } else if (type == SimdType::kFloat64x2) {
- if (ReplacementType(node) == SimdType::kInt64x2) {
- Int64ToFloat64(replacements, result);
- } else if (ReplacementType(node) == SimdType::kInt32x4) {
- Node** int32_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
- Int32ToInt64(replacements, int32_to_int64);
- Int64ToFloat64(int32_to_int64, result);
- } else {
- UNIMPLEMENTED();
+ case SimdType::kInt16x8: {
+ switch (ReplacementType(node)) {
+ case SimdType::kInt64x2: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Int64ToInt32(replacements, to_int32);
+ Int32ToSmallerInt<int16_t>(to_int32, result);
+ break;
+ }
+ case SimdType::kInt32x4: {
+ Int32ToSmallerInt<int16_t>(replacements, result);
+ break;
+ }
+ case SimdType::kInt16x8: {
+ UNREACHABLE();
+ }
+ case SimdType::kInt8x16: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ SmallerIntToInt32<int8_t>(replacements, to_int32);
+ Int32ToSmallerInt<int16_t>(to_int32, result);
+ break;
+ }
+ case SimdType::kFloat64x2: {
+ Node** to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Float64ToInt64(replacements, to_int64);
+ Int64ToInt32(to_int64, to_int32);
+ Int32ToSmallerInt<int16_t>(to_int32, result);
+ break;
+ }
+ case SimdType::kFloat32x4: {
+ Node** float32_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Float32ToInt32(replacements, float32_to_int32);
+ Int32ToSmallerInt<int16_t>(float32_to_int32, result);
+ break;
+ }
+ }
+ break;
}
- } else if (type == SimdType::kFloat32x4) {
- if (ReplacementType(node) == SimdType::kFloat64x2) {
- Node** float64_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
- Node** int64_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Float64ToInt64(replacements, float64_to_int64);
- Int64ToInt32(float64_to_int64, int64_to_int32);
- Int32ToFloat32(int64_to_int32, result);
- } else if (ReplacementType(node) == SimdType::kInt32x4) {
- Int32ToFloat32(replacements, result);
- } else if (ReplacementType(node) == SimdType::kInt16x8) {
- UNIMPLEMENTED();
- } else if (ReplacementType(node) == SimdType::kInt8x16) {
- SmallerIntToInt32<int8_t>(replacements, result);
- Int32ToFloat32(result, result);
- } else {
- UNREACHABLE();
+ case SimdType::kInt8x16: {
+ switch (ReplacementType(node)) {
+ case SimdType::kInt64x2: {
+ Node** int64_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Int64ToInt32(replacements, int64_to_int32);
+ Int32ToSmallerInt<int8_t>(int64_to_int32, result);
+ break;
+ }
+ case SimdType::kInt32x4: {
+ Int32ToSmallerInt<int8_t>(replacements, result);
+ break;
+ }
+ case SimdType::kInt16x8: {
+ Node** int16_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ SmallerIntToInt32<int16_t>(replacements, int16_to_int32);
+ Int32ToSmallerInt<int8_t>(int16_to_int32, result);
+ break;
+ }
+ case SimdType::kInt8x16: {
+ UNREACHABLE();
+ }
+ case SimdType::kFloat64x2: {
+ Node** to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Float64ToInt64(replacements, to_int64);
+ Int64ToInt32(to_int64, to_int32);
+ Int32ToSmallerInt<int8_t>(to_int32, result);
+ break;
+ }
+ case SimdType::kFloat32x4: {
+ Node** float32_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Float32ToInt32(replacements, float32_to_int32);
+ Int32ToSmallerInt<int8_t>(float32_to_int32, result);
+ break;
+ }
+ }
+ break;
}
- } else if (type == SimdType::kInt16x8) {
- if (ReplacementType(node) == SimdType::kInt32x4) {
- Int32ToSmallerInt<int16_t>(replacements, result);
- } else if (ReplacementType(node) == SimdType::kFloat32x4) {
- Node** float32_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Float32ToInt32(replacements, float32_to_int32);
- Int32ToSmallerInt<int16_t>(float32_to_int32, result);
- } else {
- UNREACHABLE();
+ case SimdType::kFloat64x2: {
+ switch (ReplacementType(node)) {
+ case SimdType::kInt64x2: {
+ Int64ToFloat64(replacements, result);
+ break;
+ }
+ case SimdType::kInt32x4: {
+ Node** int32_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ Int32ToInt64(replacements, int32_to_int64);
+ Int64ToFloat64(int32_to_int64, result);
+ break;
+ }
+ case SimdType::kInt16x8: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Node** to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ SmallerIntToInt32<int16_t>(replacements, to_int32);
+ Int32ToInt64(to_int32, to_int64);
+ Int64ToFloat64(to_int64, result);
+ break;
+ }
+ case SimdType::kInt8x16: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Node** to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ SmallerIntToInt32<int8_t>(replacements, to_int32);
+ Int32ToInt64(to_int32, to_int64);
+ Int64ToFloat64(to_int64, result);
+ break;
+ }
+ case SimdType::kFloat64x2: {
+ UNREACHABLE();
+ }
+ case SimdType::kFloat32x4: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Node** to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ Float32ToInt32(replacements, to_int32);
+ Int32ToInt64(to_int32, to_int64);
+ Int64ToFloat64(to_int64, result);
+ break;
+ }
+ }
+ break;
}
- } else if (type == SimdType::kInt8x16) {
- if (ReplacementType(node) == SimdType::kInt64x2) {
- Node** int64_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Int64ToInt32(replacements, int64_to_int32);
- Int32ToSmallerInt<int8_t>(int64_to_int32, result);
- } else if (ReplacementType(node) == SimdType::kInt32x4) {
- Int32ToSmallerInt<int8_t>(replacements, result);
- } else if (ReplacementType(node) == SimdType::kInt16x8) {
- Node** int16_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- SmallerIntToInt32<int16_t>(replacements, int16_to_int32);
- Int32ToSmallerInt<int8_t>(int16_to_int32, result);
- } else {
- UNIMPLEMENTED();
+ case SimdType::kFloat32x4: {
+ switch (ReplacementType(node)) {
+ case SimdType::kInt64x2: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Int64ToInt32(replacements, to_int32);
+ Int32ToFloat32(to_int32, result);
+ break;
+ }
+ case SimdType::kInt32x4: {
+ Int32ToFloat32(replacements, result);
+ break;
+ }
+ case SimdType::kInt16x8: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ SmallerIntToInt32<int16_t>(replacements, to_int32);
+ Int32ToFloat32(to_int32, result);
+ break;
+ }
+ case SimdType::kInt8x16: {
+ SmallerIntToInt32<int8_t>(replacements, result);
+ Int32ToFloat32(result, result);
+ break;
+ }
+ case SimdType::kFloat64x2: {
+ Node** float64_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ Node** int64_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Float64ToInt64(replacements, float64_to_int64);
+ Int64ToInt32(float64_to_int64, int64_to_int32);
+ Int32ToFloat32(int64_to_int32, result);
+ break;
+ }
+ case SimdType::kFloat32x4: {
+ UNREACHABLE();
+ }
+ }
+ break;
}
- } else {
- UNREACHABLE();
}
return result;
}
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index b86071f0ae..c4ba1e3019 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -119,6 +119,8 @@ class SimdScalarLowering {
MachineType MachineTypeFrom(SimdType simdType);
void LowerBitMaskOp(Node* node, SimdType rep_type, int msb_index);
void LowerAllTrueOp(Node* node, SimdType rep_type);
+ void LowerFloatPseudoMinMax(Node* node, const Operator* op, bool is_max,
+ SimdType type);
MachineGraph* const mcgraph_;
NodeMarker<State> state_;
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 2842259a2e..ef56d56e44 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -293,7 +293,7 @@ class RepresentationSelector {
RepresentationChanger* changer,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- TickCounter* tick_counter)
+ TickCounter* tick_counter, Linkage* linkage)
: jsgraph_(jsgraph),
zone_(zone),
might_need_revisit_(zone),
@@ -310,7 +310,8 @@ class RepresentationSelector {
node_origins_(node_origins),
type_cache_(TypeCache::Get()),
op_typer_(broker, graph_zone()),
- tick_counter_(tick_counter) {
+ tick_counter_(tick_counter),
+ linkage_(linkage) {
}
void ResetNodeInfoState() {
@@ -1362,8 +1363,8 @@ class RepresentationSelector {
return kPointerWriteBarrier;
}
NumberMatcher m(value);
- if (m.HasValue()) {
- if (IsSmiDouble(m.Value())) {
+ if (m.HasResolvedValue()) {
+ if (IsSmiDouble(m.ResolvedValue())) {
// Storing a smi doesn't need a write barrier.
return kNoWriteBarrier;
}
@@ -1408,7 +1409,6 @@ class RepresentationSelector {
IsSomePositiveOrderedNumber(input1_type)
? CheckForMinusZeroMode::kDontCheckForMinusZero
: CheckForMinusZeroMode::kCheckForMinusZero;
-
NodeProperties::ChangeOp(node, simplified()->CheckedInt32Mul(mz_mode));
}
@@ -1452,6 +1452,13 @@ class RepresentationSelector {
Type left_feedback_type = TypeOf(node->InputAt(0));
Type right_feedback_type = TypeOf(node->InputAt(1));
+
+ // Using Signed32 as restriction type amounts to promising there won't be
+ // signed overflow. This is incompatible with relying on a Word32
+ // truncation in order to skip the overflow check.
+ Type const restriction =
+ truncation.IsUsedAsWord32() ? Type::Any() : Type::Signed32();
+
// Handle the case when no int32 checks on inputs are necessary (but
// an overflow check is needed on the output). Note that we do not
// have to do any check if at most one side can be minus zero. For
@@ -1465,7 +1472,7 @@ class RepresentationSelector {
right_upper.Is(Type::Signed32OrMinusZero()) &&
(left_upper.Is(Type::Signed32()) || right_upper.Is(Type::Signed32()))) {
VisitBinop<T>(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32, Type::Signed32());
+ MachineRepresentation::kWord32, restriction);
} else {
// If the output's truncation is identify-zeros, we can pass it
// along. Moreover, if the operation is addition and we know the
@@ -1485,8 +1492,9 @@ class RepresentationSelector {
UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, FeedbackSource(),
kIdentifyZeros);
VisitBinop<T>(node, left_use, right_use, MachineRepresentation::kWord32,
- Type::Signed32());
+ restriction);
}
+
if (lower<T>()) {
if (truncation.IsUsedAsWord32() ||
!CanOverflowSigned32(node->op(), left_feedback_type,
@@ -1745,15 +1753,15 @@ class RepresentationSelector {
return UseInfo::Bool();
case CTypeInfo::Type::kInt32:
case CTypeInfo::Type::kUint32:
- case CTypeInfo::Type::kFloat32:
return UseInfo::CheckedNumberAsWord32(feedback);
// TODO(mslekova): We deopt for unsafe integers, but ultimately we want
// to make this less restrictive in order to stay on the fast path.
case CTypeInfo::Type::kInt64:
case CTypeInfo::Type::kUint64:
return UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, feedback);
+ case CTypeInfo::Type::kFloat32:
case CTypeInfo::Type::kFloat64:
- return UseInfo::CheckedNumberAsFloat64(kIdentifyZeros, feedback);
+ return UseInfo::CheckedNumberAsFloat64(kDistinguishZeros, feedback);
case CTypeInfo::Type::kV8Value:
return UseInfo::AnyTagged();
}
@@ -1838,9 +1846,10 @@ class RepresentationSelector {
// here, otherwise the input conversion will fail.
return VisitLeaf<T>(node, MachineRepresentation::kTagged);
case IrOpcode::kParameter:
- // TODO(titzer): use representation from linkage.
return VisitUnop<T>(node, UseInfo::None(),
- MachineRepresentation::kTagged);
+ linkage()
+ ->GetParameterType(ParameterIndexOf(node->op()))
+ .representation());
case IrOpcode::kInt32Constant:
return VisitLeaf<T>(node, MachineRepresentation::kWord32);
case IrOpcode::kInt64Constant:
@@ -2828,7 +2837,16 @@ class RepresentationSelector {
return VisitUnop<T>(node, UseInfo::AnyTagged(),
MachineRepresentation::kTaggedPointer);
}
- case IrOpcode::kTierUpCheck:
+ case IrOpcode::kTierUpCheck: {
+ ProcessInput<T>(node, 0, UseInfo::AnyTagged());
+ ProcessInput<T>(node, 1, UseInfo::AnyTagged());
+ ProcessInput<T>(node, 2, UseInfo::AnyTagged());
+ ProcessInput<T>(node, 3, UseInfo::TruncatingWord32());
+ ProcessInput<T>(node, 4, UseInfo::AnyTagged());
+ ProcessRemainingInputs<T>(node, 5);
+ SetOutput<T>(node, MachineRepresentation::kNone);
+ return;
+ }
case IrOpcode::kUpdateInterruptBudget: {
ProcessInput<T>(node, 0, UseInfo::AnyTagged());
ProcessRemainingInputs<T>(node, 1);
@@ -3836,6 +3854,7 @@ class RepresentationSelector {
TypeCache const* type_cache_;
OperationTyper op_typer_; // helper for the feedback typer
TickCounter* const tick_counter_;
+ Linkage* const linkage_;
NodeInfo* GetInfo(Node* node) {
DCHECK(node->id() < count_);
@@ -3843,6 +3862,7 @@ class RepresentationSelector {
}
Zone* zone() { return zone_; }
Zone* graph_zone() { return jsgraph_->zone(); }
+ Linkage* linkage() { return linkage_; }
};
// Template specializations
@@ -4006,7 +4026,8 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poisoning_level,
- TickCounter* tick_counter)
+ TickCounter* tick_counter,
+ Linkage* linkage)
: jsgraph_(jsgraph),
broker_(broker),
zone_(zone),
@@ -4014,13 +4035,14 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
source_positions_(source_positions),
node_origins_(node_origins),
poisoning_level_(poisoning_level),
- tick_counter_(tick_counter) {}
+ tick_counter_(tick_counter),
+ linkage_(linkage) {}
void SimplifiedLowering::LowerAllNodes() {
RepresentationChanger changer(jsgraph(), broker_);
RepresentationSelector selector(jsgraph(), broker_, zone_, &changer,
source_positions_, node_origins_,
- tick_counter_);
+ tick_counter_, linkage_);
selector.Run(this);
}
@@ -4279,7 +4301,7 @@ Node* SimplifiedLowering::Int32Div(Node* const node) {
return graph()->NewNode(machine()->Int32Sub(), zero, lhs);
} else if (m.right().Is(0)) {
return rhs;
- } else if (machine()->Int32DivIsSafe() || m.right().HasValue()) {
+ } else if (machine()->Int32DivIsSafe() || m.right().HasResolvedValue()) {
return graph()->NewNode(machine()->Int32Div(), lhs, rhs, graph()->start());
}
@@ -4350,7 +4372,7 @@ Node* SimplifiedLowering::Int32Mod(Node* const node) {
if (m.right().Is(-1) || m.right().Is(0)) {
return zero;
- } else if (m.right().HasValue()) {
+ } else if (m.right().HasResolvedValue()) {
return graph()->NewNode(machine()->Int32Mod(), lhs, rhs, graph()->start());
}
@@ -4463,7 +4485,7 @@ Node* SimplifiedLowering::Uint32Div(Node* const node) {
if (m.right().Is(0)) {
return zero;
- } else if (machine()->Uint32DivIsSafe() || m.right().HasValue()) {
+ } else if (machine()->Uint32DivIsSafe() || m.right().HasResolvedValue()) {
return graph()->NewNode(machine()->Uint32Div(), lhs, rhs, graph()->start());
}
@@ -4482,7 +4504,7 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
if (m.right().Is(0)) {
return zero;
- } else if (m.right().HasValue()) {
+ } else if (m.right().HasResolvedValue()) {
return graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, graph()->start());
}
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 414e3588d7..f38d3df132 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -30,7 +30,7 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
SourcePositionTable* source_position,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poisoning_level,
- TickCounter* tick_counter);
+ TickCounter* tick_counter, Linkage* linkage);
~SimplifiedLowering() = default;
void LowerAllNodes();
@@ -72,6 +72,7 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
PoisoningMitigationLevel poisoning_level_;
TickCounter* const tick_counter_;
+ Linkage* const linkage_;
Node* Float64Round(Node* const node);
Node* Float64Sign(Node* const node);
@@ -98,6 +99,7 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
CommonOperatorBuilder* common() { return jsgraph()->common(); }
MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); }
+ Linkage* linkage() { return linkage_; }
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index 3a5b3c6ec6..d2591b5502 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -20,8 +20,8 @@ namespace {
Decision DecideObjectIsSmi(Node* const input) {
NumberMatcher m(input);
- if (m.HasValue()) {
- return IsSmiDouble(m.Value()) ? Decision::kTrue : Decision::kFalse;
+ if (m.HasResolvedValue()) {
+ return IsSmiDouble(m.ResolvedValue()) ? Decision::kTrue : Decision::kFalse;
}
if (m.IsAllocate()) return Decision::kFalse;
if (m.IsChangeBitToTagged()) return Decision::kFalse;
@@ -44,7 +44,6 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
DisallowHeapAccess no_heap_access;
switch (node->opcode()) {
case IrOpcode::kBooleanNot: {
- // TODO(neis): Provide HeapObjectRefMatcher?
HeapObjectMatcher m(node->InputAt(0));
if (m.Is(factory()->true_value())) return ReplaceBoolean(false);
if (m.Is(factory()->false_value())) return ReplaceBoolean(true);
@@ -60,7 +59,7 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kChangeTaggedToBit: {
HeapObjectMatcher m(node->InputAt(0));
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
return ReplaceInt32(m.Ref(broker()).BooleanValue());
}
if (m.IsChangeBitToTagged()) return Replace(m.InputAt(0));
@@ -68,14 +67,14 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kChangeFloat64ToTagged: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceNumber(m.Value());
+ if (m.HasResolvedValue()) return ReplaceNumber(m.ResolvedValue());
if (m.IsChangeTaggedToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
case IrOpcode::kChangeInt31ToTaggedSigned:
case IrOpcode::kChangeInt32ToTagged: {
Int32Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceNumber(m.Value());
+ if (m.HasResolvedValue()) return ReplaceNumber(m.ResolvedValue());
if (m.IsChangeTaggedToInt32() || m.IsChangeTaggedSignedToInt32()) {
return Replace(m.InputAt(0));
}
@@ -84,7 +83,7 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
case IrOpcode::kChangeTaggedToFloat64:
case IrOpcode::kTruncateTaggedToFloat64: {
NumberMatcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(m.Value());
+ if (m.HasResolvedValue()) return ReplaceFloat64(m.ResolvedValue());
if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
return Replace(m.node()->InputAt(0));
}
@@ -99,7 +98,8 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
case IrOpcode::kChangeTaggedSignedToInt32:
case IrOpcode::kChangeTaggedToInt32: {
NumberMatcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceInt32(DoubleToInt32(m.ResolvedValue()));
if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
return Change(node, machine()->ChangeFloat64ToInt32(), m.InputAt(0));
}
@@ -110,7 +110,8 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kChangeTaggedToUint32: {
NumberMatcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceUint32(DoubleToUint32(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceUint32(DoubleToUint32(m.ResolvedValue()));
if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
return Change(node, machine()->ChangeFloat64ToUint32(), m.InputAt(0));
}
@@ -119,12 +120,14 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kChangeUint32ToTagged: {
Uint32Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceNumber(FastUI2D(m.ResolvedValue()));
break;
}
case IrOpcode::kTruncateTaggedToWord32: {
NumberMatcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceInt32(DoubleToInt32(m.ResolvedValue()));
if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged() ||
m.IsChangeUint32ToTagged()) {
return Replace(m.InputAt(0));
@@ -136,8 +139,9 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kCheckedFloat64ToInt32: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue() && IsInt32Double(m.Value())) {
- Node* value = jsgraph()->Int32Constant(static_cast<int32_t>(m.Value()));
+ if (m.HasResolvedValue() && IsInt32Double(m.ResolvedValue())) {
+ Node* value =
+ jsgraph()->Int32Constant(static_cast<int32_t>(m.ResolvedValue()));
ReplaceWithValue(node, value);
return Replace(value);
}
@@ -212,7 +216,8 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kNumberAbs: {
NumberMatcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceNumber(std::fabs(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceNumber(std::fabs(m.ResolvedValue()));
break;
}
case IrOpcode::kReferenceEqual: {
@@ -224,26 +229,25 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
// (x + a) + b => x + (a + b) where a and b are constants and have the
// same sign.
Int32BinopMatcher m(node);
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
Node* checked_int32_add = m.left().node();
if (checked_int32_add->opcode() == IrOpcode::kCheckedInt32Add) {
Int32BinopMatcher n(checked_int32_add);
- if (n.right().HasValue() &&
- (n.right().Value() >= 0) == (m.right().Value() >= 0)) {
+ if (n.right().HasResolvedValue() &&
+ (n.right().ResolvedValue() >= 0) ==
+ (m.right().ResolvedValue() >= 0)) {
int32_t val;
bool overflow = base::bits::SignedAddOverflow32(
- n.right().Value(), m.right().Value(), &val);
+ n.right().ResolvedValue(), m.right().ResolvedValue(), &val);
if (!overflow) {
- bool has_no_other_value_uses = true;
+ bool has_no_other_uses = true;
for (Edge edge : checked_int32_add->use_edges()) {
- if (!edge.from()->IsDead() &&
- !NodeProperties::IsEffectEdge(edge) &&
- edge.from() != node) {
- has_no_other_value_uses = false;
+ if (!edge.from()->IsDead() && edge.from() != node) {
+ has_no_other_uses = false;
break;
}
}
- if (has_no_other_value_uses) {
+ if (has_no_other_uses) {
node->ReplaceInput(0, n.left().node());
node->ReplaceInput(1, jsgraph()->Int32Constant(val));
RelaxEffectsAndControls(checked_int32_add);
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index b7069b44f3..650de7fb55 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -29,6 +29,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
SimplifiedOperatorReducer(Editor* editor, JSGraph* jsgraph,
JSHeapBroker* broker);
~SimplifiedOperatorReducer() final;
+ SimplifiedOperatorReducer(const SimplifiedOperatorReducer&) = delete;
+ SimplifiedOperatorReducer& operator=(const SimplifiedOperatorReducer&) =
+ delete;
const char* reducer_name() const override {
return "SimplifiedOperatorReducer";
@@ -57,8 +60,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
JSGraph* const jsgraph_;
JSHeapBroker* const broker_;
-
- DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 33bd71d221..db6d8257b1 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -294,7 +294,7 @@ bool operator==(DynamicCheckMapsParameters const& lhs,
DCHECK_IMPLIES(lhs.feedback() == rhs.feedback(),
lhs.flags() == rhs.flags() && lhs.state() == rhs.state() &&
lhs.handler().address() == rhs.handler().address() &&
- lhs.map().address() == rhs.map().address());
+ lhs.maps() == rhs.maps());
return lhs.feedback() == rhs.feedback();
}
@@ -308,7 +308,7 @@ size_t hash_value(DynamicCheckMapsParameters const& p) {
std::ostream& operator<<(std::ostream& os,
DynamicCheckMapsParameters const& p) {
return os << p.handler() << ", " << p.feedback() << "," << p.state() << ","
- << p.flags() << "," << p.map().address();
+ << p.flags() << "," << p.maps();
}
DynamicCheckMapsParameters const& DynamicCheckMapsParametersOf(
@@ -1325,7 +1325,7 @@ const Operator* SimplifiedOperatorBuilder::UpdateInterruptBudget(int delta) {
const Operator* SimplifiedOperatorBuilder::TierUpCheck() {
return zone()->New<Operator>(IrOpcode::kTierUpCheck,
Operator::kNoThrow | Operator::kNoDeopt,
- "TierUpCheck", 1, 1, 1, 0, 1, 0);
+ "TierUpCheck", 5, 1, 1, 0, 1, 0);
}
const Operator* SimplifiedOperatorBuilder::AssertType(Type type) {
@@ -1487,10 +1487,9 @@ const Operator* SimplifiedOperatorBuilder::CheckMaps(
}
const Operator* SimplifiedOperatorBuilder::DynamicCheckMaps(
- CheckMapsFlags flags, Handle<Object> handler, MaybeHandle<Map> maybe_map,
- const FeedbackSource& feedback) {
- DynamicCheckMapsParameters const parameters(flags, handler, maybe_map,
- feedback);
+ CheckMapsFlags flags, Handle<Object> handler,
+ ZoneHandleSet<Map> const& maps, const FeedbackSource& feedback) {
+ DynamicCheckMapsParameters const parameters(flags, handler, maps, feedback);
return zone()->New<Operator1<DynamicCheckMapsParameters>>( // --
IrOpcode::kDynamicCheckMaps, // opcode
Operator::kNoThrow | Operator::kNoWrite, // flags
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index eab865fd59..e130674c91 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -85,6 +85,9 @@ struct FieldAccess {
ConstFieldInfo const_field_info; // the constness of this access, and the
// field owner map, if the access is const
bool is_store_in_literal; // originates from a kStoreInLiteral access
+#ifdef V8_HEAP_SANDBOX
+ ExternalPointerTag external_pointer_tag = kExternalPointerNullTag;
+#endif
FieldAccess()
: base_is_tagged(kTaggedBase),
@@ -101,7 +104,12 @@ struct FieldAccess {
WriteBarrierKind write_barrier_kind,
LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe,
ConstFieldInfo const_field_info = ConstFieldInfo::None(),
- bool is_store_in_literal = false)
+ bool is_store_in_literal = false
+#ifdef V8_HEAP_SANDBOX
+ ,
+ ExternalPointerTag external_pointer_tag = kExternalPointerNullTag
+#endif
+ )
: base_is_tagged(base_is_tagged),
offset(offset),
name(name),
@@ -111,7 +119,12 @@ struct FieldAccess {
write_barrier_kind(write_barrier_kind),
load_sensitivity(load_sensitivity),
const_field_info(const_field_info),
- is_store_in_literal(is_store_in_literal) {
+ is_store_in_literal(is_store_in_literal)
+#ifdef V8_HEAP_SANDBOX
+ ,
+ external_pointer_tag(external_pointer_tag)
+#endif
+ {
DCHECK_GE(offset, 0);
}
@@ -432,25 +445,22 @@ class DynamicCheckMapsParameters final {
enum ICState { kMonomorphic, kPolymorphic };
DynamicCheckMapsParameters(CheckMapsFlags flags, Handle<Object> handler,
- MaybeHandle<Map> maybe_map,
+ ZoneHandleSet<Map> const& maps,
const FeedbackSource& feedback)
- : flags_(flags),
- handler_(handler),
- maybe_map_(maybe_map),
- feedback_(feedback) {}
+ : flags_(flags), handler_(handler), maps_(maps), feedback_(feedback) {}
CheckMapsFlags flags() const { return flags_; }
Handle<Object> handler() const { return handler_; }
- MaybeHandle<Map> map() const { return maybe_map_; }
+ ZoneHandleSet<Map> const& maps() const { return maps_; }
FeedbackSource const& feedback() const { return feedback_; }
ICState state() const {
- return maybe_map_.is_null() ? ICState::kPolymorphic : ICState::kMonomorphic;
+ return maps_.size() == 1 ? ICState::kMonomorphic : ICState::kPolymorphic;
}
private:
CheckMapsFlags const flags_;
Handle<Object> const handler_;
- MaybeHandle<Map> const maybe_map_;
+ ZoneHandleSet<Map> const maps_;
FeedbackSource const feedback_;
};
@@ -708,6 +718,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
: public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit SimplifiedOperatorBuilder(Zone* zone);
+ SimplifiedOperatorBuilder(const SimplifiedOperatorBuilder&) = delete;
+ SimplifiedOperatorBuilder& operator=(const SimplifiedOperatorBuilder&) =
+ delete;
const Operator* BooleanNot();
@@ -888,7 +901,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>,
const FeedbackSource& = FeedbackSource());
const Operator* DynamicCheckMaps(CheckMapsFlags flags, Handle<Object> handler,
- MaybeHandle<Map> map,
+ ZoneHandleSet<Map> const& maps,
const FeedbackSource& feedback);
const Operator* CheckNotTaggedHole();
const Operator* CheckNumber(const FeedbackSource& feedback);
@@ -1054,8 +1067,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const SimplifiedOperatorGlobalCache& cache_;
Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorBuilder);
};
// Node wrappers.
@@ -1177,7 +1188,12 @@ class TierUpCheckNode final : public SimplifiedNodeWrapperBase {
CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kTierUpCheck);
}
-#define INPUTS(V) V(FeedbackVector, feedback_vector, 0, FeedbackVector)
+#define INPUTS(V) \
+ V(FeedbackVector, feedback_vector, 0, FeedbackVector) \
+ V(Target, target, 1, JSReceiver) \
+ V(NewTarget, new_target, 2, Object) \
+ V(InputCount, input_count, 3, UntaggedT) \
+ V(Context, context, 4, Context)
INPUTS(DEFINE_INPUT_ACCESSORS)
#undef INPUTS
};
diff --git a/deps/v8/src/compiler/type-narrowing-reducer.h b/deps/v8/src/compiler/type-narrowing-reducer.h
index 136f11626e..ab8c4a483c 100644
--- a/deps/v8/src/compiler/type-narrowing-reducer.h
+++ b/deps/v8/src/compiler/type-narrowing-reducer.h
@@ -21,6 +21,8 @@ class V8_EXPORT_PRIVATE TypeNarrowingReducer final
public:
TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker);
~TypeNarrowingReducer() final;
+ TypeNarrowingReducer(const TypeNarrowingReducer&) = delete;
+ TypeNarrowingReducer& operator=(const TypeNarrowingReducer&) = delete;
const char* reducer_name() const override { return "TypeNarrowingReducer"; }
@@ -33,8 +35,6 @@ class V8_EXPORT_PRIVATE TypeNarrowingReducer final
JSGraph* const jsgraph_;
OperationTyper op_typer_;
-
- DISALLOW_COPY_AND_ASSIGN(TypeNarrowingReducer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index abc88c4b8e..6905f4e36d 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -813,7 +813,7 @@ Reduction TypedOptimization::ReduceJSToNumberInput(Node* input) {
if (input_type.Is(Type::String())) {
HeapObjectMatcher m(input);
- if (m.HasValue() && m.Ref(broker()).IsString()) {
+ if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
StringRef input_value = m.Ref(broker()).AsString();
double number;
ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index 336c29540d..be3f56f845 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -30,6 +30,8 @@ class V8_EXPORT_PRIVATE TypedOptimization final
TypedOptimization(Editor* editor, CompilationDependencies* dependencies,
JSGraph* jsgraph, JSHeapBroker* broker);
~TypedOptimization() override;
+ TypedOptimization(const TypedOptimization&) = delete;
+ TypedOptimization& operator=(const TypedOptimization&) = delete;
const char* reducer_name() const override { return "TypedOptimization"; }
@@ -89,8 +91,6 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Type const true_type_;
Type const false_type_;
TypeCache const* type_cache_;
-
- DISALLOW_COPY_AND_ASSIGN(TypedOptimization);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index a4996f3cc2..831263554a 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -576,6 +576,10 @@ Type Typer::Visitor::ObjectIsCallable(Type type, Typer* t) {
Type Typer::Visitor::ObjectIsConstructor(Type type, Typer* t) {
// TODO(turbofan): Introduce a Type::Constructor?
CHECK(!type.IsNone());
+ if (type.IsHeapConstant() &&
+ type.AsHeapConstant()->Ref().map().is_constructor()) {
+ return t->singleton_true_;
+ }
if (!type.Maybe(Type::Callable())) return t->singleton_false_;
return Type::Boolean();
}
@@ -1434,7 +1438,7 @@ Type Typer::Visitor::JSOrdinaryHasInstanceTyper(Type lhs, Type rhs, Typer* t) {
}
Type Typer::Visitor::TypeJSGetSuperConstructor(Node* node) {
- return Type::Callable();
+ return Type::NonInternal();
}
// JS context operators.
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index 305470d724..d1b6afeaf9 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -31,6 +31,8 @@ class V8_EXPORT_PRIVATE Typer {
Typer(JSHeapBroker* broker, Flags flags, Graph* graph,
TickCounter* tick_counter);
~Typer();
+ Typer(const Typer&) = delete;
+ Typer& operator=(const Typer&) = delete;
void Run();
// TODO(bmeurer,jarin): Remove this once we have a notion of "roots" on Graph.
@@ -57,8 +59,6 @@ class V8_EXPORT_PRIVATE Typer {
Type singleton_false_;
Type singleton_true_;
-
- DISALLOW_COPY_AND_ASSIGN(Typer);
};
DEFINE_OPERATORS_FOR_FLAGS(Typer::Flags)
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 0daf20d78a..3594dd9cad 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -294,7 +294,6 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case BYTECODE_ARRAY_TYPE:
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
case ARRAY_BOILERPLATE_DESCRIPTION_TYPE:
- case DESCRIPTOR_ARRAY_TYPE:
case TRANSITION_ARRAY_TYPE:
case FEEDBACK_CELL_TYPE:
case CLOSURE_FEEDBACK_CELL_ARRAY_TYPE:
@@ -310,6 +309,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case EVAL_CONTEXT_TYPE:
case FUNCTION_CONTEXT_TYPE:
case MODULE_CONTEXT_TYPE:
+ case MODULE_REQUEST_TYPE:
case NATIVE_CONTEXT_TYPE:
case SCRIPT_CONTEXT_TYPE:
case WITH_CONTEXT_TYPE:
@@ -329,50 +329,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
// Remaining instance types are unsupported for now. If any of them do
// require bit set types, they should get kOtherInternal.
- case FREE_SPACE_TYPE:
- case FILLER_TYPE:
- case ACCESS_CHECK_INFO_TYPE:
- case ASM_WASM_DATA_TYPE:
- case CALL_HANDLER_INFO_TYPE:
- case INTERCEPTOR_INFO_TYPE:
- case OBJECT_TEMPLATE_INFO_TYPE:
- case ALLOCATION_MEMENTO_TYPE:
- case ALIASED_ARGUMENTS_ENTRY_TYPE:
- case PROMISE_CAPABILITY_TYPE:
- case PROMISE_REACTION_TYPE:
- case CLASS_POSITIONS_TYPE:
- case DEBUG_INFO_TYPE:
- case STACK_FRAME_INFO_TYPE:
- case STACK_TRACE_FRAME_TYPE:
- case SMALL_ORDERED_HASH_MAP_TYPE:
- case SMALL_ORDERED_HASH_SET_TYPE:
- case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
- case PROTOTYPE_INFO_TYPE:
- case INTERPRETER_DATA_TYPE:
- case TEMPLATE_OBJECT_DESCRIPTION_TYPE:
- case TUPLE2_TYPE:
- case BREAK_POINT_TYPE:
- case BREAK_POINT_INFO_TYPE:
- case WASM_VALUE_TYPE:
- case CACHED_TEMPLATE_OBJECT_TYPE:
- case ENUM_CACHE_TYPE:
- case WASM_CAPI_FUNCTION_DATA_TYPE:
- case WASM_INDIRECT_FUNCTION_TABLE_TYPE:
- case WASM_EXCEPTION_TAG_TYPE:
- case WASM_EXPORTED_FUNCTION_DATA_TYPE:
- case WASM_JS_FUNCTION_DATA_TYPE:
- case LOAD_HANDLER_TYPE:
- case STORE_HANDLER_TYPE:
- case ASYNC_GENERATOR_REQUEST_TYPE:
- case CODE_DATA_CONTAINER_TYPE:
- case CALLBACK_TASK_TYPE:
- case CALLABLE_TASK_TYPE:
- case PROMISE_FULFILL_REACTION_JOB_TASK_TYPE:
- case PROMISE_REJECT_REACTION_JOB_TASK_TYPE:
- case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE:
-#define MAKE_TORQUE_CLASS_TYPE(INSTANCE_TYPE, Name, name) case INSTANCE_TYPE:
- TORQUE_DEFINED_INSTANCE_TYPE_LIST(MAKE_TORQUE_CLASS_TYPE)
-#undef MAKE_TORQUE_CLASS_TYPE
+ default:
UNREACHABLE();
}
UNREACHABLE();
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 302e1212ee..b137e6711d 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -772,7 +772,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// We don't check the input for Type::Function because this_function can
// be context-allocated.
CheckValueInputIs(node, 0, Type::Any());
- CheckTypeIs(node, Type::Callable());
+ CheckTypeIs(node, Type::NonInternal());
break;
case IrOpcode::kJSHasContextExtension:
diff --git a/deps/v8/src/compiler/verifier.h b/deps/v8/src/compiler/verifier.h
index 308b44060a..0be0102cc0 100644
--- a/deps/v8/src/compiler/verifier.h
+++ b/deps/v8/src/compiler/verifier.h
@@ -25,6 +25,9 @@ class Verifier {
enum CheckInputs { kValuesOnly, kAll };
enum CodeType { kDefault, kWasm };
+ Verifier(const Verifier&) = delete;
+ Verifier& operator=(const Verifier&) = delete;
+
static void Run(Graph* graph, Typing typing = TYPED,
CheckInputs check_inputs = kAll,
CodeType code_type = kDefault);
@@ -53,7 +56,6 @@ class Verifier {
private:
class Visitor;
- DISALLOW_COPY_AND_ASSIGN(Verifier);
};
// Verifies properties of a schedule, such as dominance, phi placement, etc.
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 91dde088f6..d6b7113b27 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -16,6 +16,7 @@
#include "src/codegen/code-factory.h"
#include "src/codegen/compiler.h"
#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/machine-type.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/instruction-selector.h"
@@ -297,9 +298,9 @@ Node* WasmGraphBuilder::RefFunc(uint32_t function_index) {
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmRefFunc, RelocInfo::WASM_STUB_CALL);
- return SetEffectControl(
- graph()->NewNode(mcgraph()->common()->Call(call_descriptor), call_target,
- Uint32Constant(function_index), effect(), control()));
+ return SetEffectControl(graph()->NewNode(
+ mcgraph()->common()->Call(call_descriptor), call_target,
+ mcgraph()->Uint32Constant(function_index), effect(), control()));
}
Node* WasmGraphBuilder::RefAsNonNull(Node* arg,
@@ -321,10 +322,6 @@ Node* WasmGraphBuilder::BuildLoadIsolateRoot() {
return LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
}
-Node* WasmGraphBuilder::Uint32Constant(uint32_t value) {
- return mcgraph()->Uint32Constant(value);
-}
-
Node* WasmGraphBuilder::Int32Constant(int32_t value) {
return mcgraph()->Int32Constant(value);
}
@@ -333,10 +330,6 @@ Node* WasmGraphBuilder::Int64Constant(int64_t value) {
return mcgraph()->Int64Constant(value);
}
-Node* WasmGraphBuilder::IntPtrConstant(intptr_t value) {
- return mcgraph()->IntPtrConstant(value);
-}
-
void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
DCHECK_NOT_NULL(env_); // Wrappers don't get stack checks.
if (!FLAG_wasm_stack_checks || !env_->runtime_exception_support) {
@@ -1062,7 +1055,7 @@ Node* WasmGraphBuilder::TrapIfEq32(wasm::TrapReason reason, Node* node,
int32_t val,
wasm::WasmCodePosition position) {
Int32Matcher m(node);
- if (m.HasValue() && !m.Is(val)) return graph()->start();
+ if (m.HasResolvedValue() && !m.Is(val)) return graph()->start();
if (val == 0) {
return TrapIfFalse(reason, node, position);
} else {
@@ -1084,7 +1077,7 @@ Node* WasmGraphBuilder::TrapIfEq64(wasm::TrapReason reason, Node* node,
int64_t val,
wasm::WasmCodePosition position) {
Int64Matcher m(node);
- if (m.HasValue() && !m.Is(val)) return graph()->start();
+ if (m.HasResolvedValue() && !m.Is(val)) return graph()->start();
return TrapIfTrue(reason,
graph()->NewNode(mcgraph()->machine()->Word64Equal(), node,
mcgraph()->Int64Constant(val)),
@@ -1144,9 +1137,10 @@ Node* WasmGraphBuilder::MaskShiftCount32(Node* node) {
if (!mcgraph()->machine()->Word32ShiftIsSafe()) {
// Shifts by constants are so common we pattern-match them here.
Int32Matcher match(node);
- if (match.HasValue()) {
- int32_t masked = (match.Value() & kMask32);
- if (match.Value() != masked) node = mcgraph()->Int32Constant(masked);
+ if (match.HasResolvedValue()) {
+ int32_t masked = (match.ResolvedValue() & kMask32);
+ if (match.ResolvedValue() != masked)
+ node = mcgraph()->Int32Constant(masked);
} else {
node = graph()->NewNode(mcgraph()->machine()->Word32And(), node,
mcgraph()->Int32Constant(kMask32));
@@ -1160,9 +1154,10 @@ Node* WasmGraphBuilder::MaskShiftCount64(Node* node) {
if (!mcgraph()->machine()->Word32ShiftIsSafe()) {
// Shifts by constants are so common we pattern-match them here.
Int64Matcher match(node);
- if (match.HasValue()) {
- int64_t masked = (match.Value() & kMask64);
- if (match.Value() != masked) node = mcgraph()->Int64Constant(masked);
+ if (match.HasResolvedValue()) {
+ int64_t masked = (match.ResolvedValue() & kMask64);
+ if (match.ResolvedValue() != masked)
+ node = mcgraph()->Int64Constant(masked);
} else {
node = graph()->NewNode(mcgraph()->machine()->Word64And(), node,
mcgraph()->Int64Constant(kMask64));
@@ -2084,7 +2079,7 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
Node* create_parameters[] = {
LoadExceptionTagFromTable(exception_index),
- BuildChangeUint31ToSmi(Uint32Constant(encoded_size))};
+ BuildChangeUint31ToSmi(mcgraph()->Uint32Constant(encoded_size))};
Node* except_obj =
BuildCallToRuntime(Runtime::kWasmThrowCreate, create_parameters,
arraysize(create_parameters));
@@ -2357,10 +2352,10 @@ Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
MachineOperatorBuilder* m = mcgraph()->machine();
Int32Matcher mr(right);
- if (mr.HasValue()) {
- if (mr.Value() == 0) {
+ if (mr.HasResolvedValue()) {
+ if (mr.ResolvedValue() == 0) {
return mcgraph()->Int32Constant(0);
- } else if (mr.Value() == -1) {
+ } else if (mr.ResolvedValue() == -1) {
// The result is the negation of the left input.
return graph()->NewNode(m->Int32Sub(), mcgraph()->Int32Constant(0), left);
}
@@ -2400,8 +2395,8 @@ Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
Node* const zero = mcgraph()->Int32Constant(0);
Int32Matcher mr(right);
- if (mr.HasValue()) {
- if (mr.Value() == 0 || mr.Value() == -1) {
+ if (mr.HasResolvedValue()) {
+ if (mr.ResolvedValue() == 0 || mr.ResolvedValue() == -1) {
return zero;
}
return graph()->NewNode(m->Int32Mod(), left, right, control());
@@ -2672,7 +2667,7 @@ Node* WasmGraphBuilder::BuildWasmCall(const wasm::FunctionSig* sig,
wasm::WasmCodePosition position,
Node* instance_node,
UseRetpoline use_retpoline) {
- auto call_descriptor =
+ CallDescriptor* call_descriptor =
GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline);
const Operator* op = mcgraph()->common()->Call(call_descriptor);
Node* call = BuildCallNode(sig, args, position, instance_node, op);
@@ -2699,7 +2694,7 @@ Node* WasmGraphBuilder::BuildWasmReturnCall(const wasm::FunctionSig* sig,
wasm::WasmCodePosition position,
Node* instance_node,
UseRetpoline use_retpoline) {
- auto call_descriptor =
+ CallDescriptor* call_descriptor =
GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline);
const Operator* op = mcgraph()->common()->TailCall(call_descriptor);
Node* call = BuildCallNode(sig, args, position, instance_node, op);
@@ -2878,7 +2873,7 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
// Bounds check against the table size.
Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, ift_size);
- TrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
+ TrapIfFalse(wasm::kTrapTableOutOfBounds, in_bounds, position);
// Mask the key to prevent SSCA.
if (untrusted_code_mitigations_) {
@@ -2896,20 +2891,27 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
Node* int32_scaled_key = Uint32ToUintptr(
graph()->NewNode(machine->Word32Shl(), key, Int32Constant(2)));
+ Node* loaded_sig = SetEffect(
+ graph()->NewNode(machine->Load(MachineType::Int32()), ift_sig_ids,
+ int32_scaled_key, effect(), control()));
// Check that the dynamic type of the function is a subtype of its static
// (table) type. Currently, the only subtyping between function types is
// $t <: funcref for all $t: function_type.
// TODO(7748): Expand this with function subtyping.
- if (env_->module->tables[table_index].type == wasm::kWasmFuncRef) {
- int32_t expected_sig_id = env_->module->signature_ids[sig_index];
-
- Node* loaded_sig = SetEffect(
- graph()->NewNode(machine->Load(MachineType::Int32()), ift_sig_ids,
- int32_scaled_key, effect(), control()));
- Node* sig_match = graph()->NewNode(machine->WordEqual(), loaded_sig,
+ const bool needs_typechecking =
+ env_->module->tables[table_index].type == wasm::kWasmFuncRef;
+ if (needs_typechecking) {
+ int32_t expected_sig_id = env_->module->canonicalized_type_ids[sig_index];
+ Node* sig_match = graph()->NewNode(machine->Word32Equal(), loaded_sig,
Int32Constant(expected_sig_id));
-
TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
+ } else {
+ // We still have to check that the entry is initialized.
+ // TODO(9495): Skip this check for non-nullable tables when they are
+ // allowed.
+ Node* function_is_null =
+ graph()->NewNode(machine->Word32Equal(), loaded_sig, Int32Constant(-1));
+ TrapIfTrue(wasm::kTrapNullDereference, function_is_null, position);
}
Node* tagged_scaled_key;
@@ -2953,10 +2955,9 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
}
}
-Node* WasmGraphBuilder::BuildLoadFunctionDataFromExportedFunction(
- Node* closure) {
+Node* WasmGraphBuilder::BuildLoadFunctionDataFromJSFunction(Node* js_function) {
Node* shared = gasm_->Load(
- MachineType::AnyTagged(), closure,
+ MachineType::AnyTagged(), js_function,
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction());
return gasm_->Load(MachineType::AnyTagged(), shared,
SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag);
@@ -3001,7 +3002,7 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
const wasm::FunctionSig* sig = env_->module->signature(sig_index);
- Node* function_data = BuildLoadFunctionDataFromExportedFunction(args[0]);
+ Node* function_data = BuildLoadFunctionDataFromJSFunction(args[0]);
Node* is_js_function =
HasInstanceType(gasm_.get(), function_data, WASM_JS_FUNCTION_DATA_TYPE);
@@ -3078,13 +3079,30 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
}
{
- // Call to a WasmJSFunction.
- // The call target is the wasm-to-js wrapper code.
+ // Call to a WasmJSFunction. The call target is
+ // function_data->wasm_to_js_wrapper_code()->instruction_start().
+ // The instance_node is the pair
+ // (current WasmInstanceObject, function_data->callable()).
gasm_->Bind(&js_label);
- // TODO(9495): Implement when the interaction with the type reflection
- // proposal is clear.
- TrapIfTrue(wasm::kTrapWasmJSFunction, gasm_->Int32Constant(1), position);
- gasm_->Goto(&end_label, args[0], RefNull() /* Dummy value */);
+
+ Node* wrapper_code =
+ gasm_->Load(MachineType::TaggedPointer(), function_data,
+ wasm::ObjectAccess::ToTagged(
+ WasmJSFunctionData::kWasmToJsWrapperCodeOffset));
+ Node* call_target = gasm_->IntAdd(
+ wrapper_code,
+ gasm_->IntPtrConstant(wasm::ObjectAccess::ToTagged(Code::kHeaderSize)));
+
+ Node* callable = gasm_->Load(
+ MachineType::TaggedPointer(), function_data,
+ wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset));
+ // TODO(manoskouk): Find an elegant way to avoid allocating this pair for
+ // every call.
+ Node* function_instance_node = CALL_BUILTIN(
+ WasmAllocatePair, instance_node_.get(), callable,
+ LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
+
+ gasm_->Goto(&end_label, call_target, function_instance_node);
}
gasm_->Bind(&end_label);
@@ -3158,9 +3176,9 @@ Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
// Implement Rol by Ror since TurboFan does not have Rol opcode.
// TODO(weiliang): support Word32Rol opcode in TurboFan.
Int32Matcher m(right);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
return Binop(wasm::kExprI32Ror, left,
- mcgraph()->Int32Constant(32 - (m.Value() & 0x1F)));
+ mcgraph()->Int32Constant(32 - (m.ResolvedValue() & 0x1F)));
} else {
return Binop(wasm::kExprI32Ror, left,
Binop(wasm::kExprI32Sub, mcgraph()->Int32Constant(32), right));
@@ -3172,8 +3190,8 @@ Node* WasmGraphBuilder::BuildI64Rol(Node* left, Node* right) {
// TODO(weiliang): support Word64Rol opcode in TurboFan.
Int64Matcher m(right);
Node* inv_right =
- m.HasValue()
- ? mcgraph()->Int64Constant(64 - (m.Value() & 0x3F))
+ m.HasResolvedValue()
+ ? mcgraph()->Int64Constant(64 - (m.ResolvedValue() & 0x3F))
: Binop(wasm::kExprI64Sub, mcgraph()->Int64Constant(64), right);
return Binop(wasm::kExprI64Ror, left, inv_right);
}
@@ -3183,59 +3201,44 @@ Node* WasmGraphBuilder::Invert(Node* node) {
}
Node* WasmGraphBuilder::BuildTruncateIntPtrToInt32(Node* value) {
- if (mcgraph()->machine()->Is64()) {
- value =
- graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), value);
- }
- return value;
+ return mcgraph()->machine()->Is64() ? gasm_->TruncateInt64ToInt32(value)
+ : value;
}
Node* WasmGraphBuilder::BuildChangeInt32ToIntPtr(Node* value) {
- if (mcgraph()->machine()->Is64()) {
- value = graph()->NewNode(mcgraph()->machine()->ChangeInt32ToInt64(), value);
- }
- return value;
+ return mcgraph()->machine()->Is64() ? gasm_->ChangeInt32ToInt64(value)
+ : value;
}
Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
// With pointer compression, only the lower 32 bits are used.
- if (COMPRESS_POINTERS_BOOL) {
- return graph()->NewNode(mcgraph()->machine()->Word32Shl(), value,
- BuildSmiShiftBitsConstant32());
- }
- value = BuildChangeInt32ToIntPtr(value);
- return graph()->NewNode(mcgraph()->machine()->WordShl(), value,
- BuildSmiShiftBitsConstant());
+ return COMPRESS_POINTERS_BOOL
+ ? gasm_->Word32Shl(value, BuildSmiShiftBitsConstant32())
+ : gasm_->WordShl(BuildChangeInt32ToIntPtr(value),
+ BuildSmiShiftBitsConstant());
}
Node* WasmGraphBuilder::BuildChangeUint31ToSmi(Node* value) {
- if (COMPRESS_POINTERS_BOOL) {
- return graph()->NewNode(mcgraph()->machine()->Word32Shl(), value,
- BuildSmiShiftBitsConstant32());
- }
- return graph()->NewNode(mcgraph()->machine()->WordShl(),
- Uint32ToUintptr(value), BuildSmiShiftBitsConstant());
+ return COMPRESS_POINTERS_BOOL
+ ? gasm_->Word32Shl(value, BuildSmiShiftBitsConstant32())
+ : graph()->NewNode(mcgraph()->machine()->WordShl(),
+ Uint32ToUintptr(value),
+ BuildSmiShiftBitsConstant());
}
Node* WasmGraphBuilder::BuildSmiShiftBitsConstant() {
- return mcgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+ return gasm_->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
Node* WasmGraphBuilder::BuildSmiShiftBitsConstant32() {
- return mcgraph()->Int32Constant(kSmiShiftSize + kSmiTagSize);
+ return gasm_->Int32Constant(kSmiShiftSize + kSmiTagSize);
}
Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
- if (COMPRESS_POINTERS_BOOL) {
- value =
- graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), value);
- value = graph()->NewNode(mcgraph()->machine()->Word32Sar(), value,
- BuildSmiShiftBitsConstant32());
- } else {
- value = BuildChangeSmiToIntPtr(value);
- value = BuildTruncateIntPtrToInt32(value);
- }
- return value;
+ return COMPRESS_POINTERS_BOOL
+ ? gasm_->Word32Sar(gasm_->TruncateInt64ToInt32(value),
+ BuildSmiShiftBitsConstant32())
+ : BuildTruncateIntPtrToInt32(BuildChangeSmiToIntPtr(value));
}
Node* WasmGraphBuilder::BuildChangeSmiToIntPtr(Node* value) {
@@ -3250,7 +3253,7 @@ Node* WasmGraphBuilder::BuildChangeSmiToIntPtr(Node* value) {
Node* WasmGraphBuilder::BuildConvertUint32ToSmiWithSaturation(Node* value,
uint32_t maxval) {
DCHECK(Smi::IsValid(maxval));
- Node* max = Uint32Constant(maxval);
+ Node* max = mcgraph()->Uint32Constant(maxval);
Node* check = graph()->NewNode(mcgraph()->machine()->Uint32LessThanOrEqual(),
value, max);
Node* valsmi = BuildChangeUint31ToSmi(value);
@@ -3469,13 +3472,12 @@ void WasmGraphBuilder::GetBaseAndOffsetForImportedMutableExternRefGlobal(
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)));
}
-Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
+Node* WasmGraphBuilder::MemBuffer(uintptr_t offset) {
DCHECK_NOT_NULL(instance_cache_);
Node* mem_start = instance_cache_->mem_start;
DCHECK_NOT_NULL(mem_start);
if (offset == 0) return mem_start;
- return graph()->NewNode(mcgraph()->machine()->IntAdd(), mem_start,
- mcgraph()->IntPtrConstant(offset));
+ return gasm_->IntAdd(mem_start, gasm_->UintPtrConstant(offset));
}
Node* WasmGraphBuilder::CurrentMemoryPages() {
@@ -3612,7 +3614,7 @@ Node* WasmGraphBuilder::TableGet(uint32_t table_index, Node* index,
return SetEffectControl(graph()->NewNode(
mcgraph()->common()->Call(call_descriptor), call_target,
- IntPtrConstant(table_index), index, effect(), control()));
+ mcgraph()->IntPtrConstant(table_index), index, effect(), control()));
}
Node* WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val,
@@ -3624,13 +3626,12 @@ Node* WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val,
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmTableSet, RelocInfo::WASM_STUB_CALL);
- return SetEffectControl(graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), call_target,
- IntPtrConstant(table_index), index, val, effect(), control()));
+ return gasm_->Call(call_descriptor, call_target,
+ gasm_->IntPtrConstant(table_index), index, val);
}
Node* WasmGraphBuilder::CheckBoundsAndAlignment(
- uint8_t access_size, Node* index, uint32_t offset,
+ int8_t access_size, Node* index, uint64_t offset,
wasm::WasmCodePosition position) {
// Atomic operations need bounds checks until the backend can emit protected
// loads.
@@ -3639,11 +3640,13 @@ Node* WasmGraphBuilder::CheckBoundsAndAlignment(
const uintptr_t align_mask = access_size - 1;
+ // {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
+ uintptr_t capped_offset = static_cast<uintptr_t>(offset);
// Don't emit an alignment check if the index is a constant.
// TODO(wasm): a constant match is also done above in {BoundsCheckMem}.
UintPtrMatcher match(index);
- if (match.HasValue()) {
- uintptr_t effective_offset = match.Value() + offset;
+ if (match.HasResolvedValue()) {
+ uintptr_t effective_offset = match.ResolvedValue() + capped_offset;
if ((effective_offset & align_mask) != 0) {
// statically known to be unaligned; trap.
TrapIfEq32(wasm::kTrapUnalignedAccess, Int32Constant(0), 0, position);
@@ -3654,15 +3657,12 @@ Node* WasmGraphBuilder::CheckBoundsAndAlignment(
// Unlike regular memory accesses, atomic memory accesses should trap if
// the effective offset is misaligned.
// TODO(wasm): this addition is redundant with one inserted by {MemBuffer}.
- Node* effective_offset = graph()->NewNode(mcgraph()->machine()->IntAdd(),
- MemBuffer(offset), index);
+ Node* effective_offset = gasm_->IntAdd(MemBuffer(capped_offset), index);
- Node* cond = graph()->NewNode(mcgraph()->machine()->WordAnd(),
- effective_offset, IntPtrConstant(align_mask));
+ Node* cond =
+ gasm_->WordAnd(effective_offset, gasm_->IntPtrConstant(align_mask));
TrapIfFalse(wasm::kTrapUnalignedAccess,
- graph()->NewNode(mcgraph()->machine()->Word32Equal(), cond,
- mcgraph()->Int32Constant(0)),
- position);
+ gasm_->Word32Equal(cond, gasm_->Int32Constant(0)), position);
return index;
}
@@ -3688,7 +3688,7 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
env_->max_memory_size)) {
// The access will be out of bounds, even for the largest memory.
TrapIfEq32(wasm::kTrapMemOutOfBounds, Int32Constant(0), 0, position);
- return mcgraph()->UintPtrConstant(0);
+ return gasm_->UintPtrConstant(0);
}
uintptr_t end_offset = offset + access_size - 1u;
Node* end_offset_node = mcgraph_->UintPtrConstant(end_offset);
@@ -3702,19 +3702,18 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
// - computing {effective_size} as {mem_size - end_offset} and
// - checking that {index < effective_size}.
- auto m = mcgraph()->machine();
Node* mem_size = instance_cache_->mem_size;
if (end_offset >= env_->min_memory_size) {
// The end offset is larger than the smallest memory.
// Dynamically check the end offset against the dynamic memory size.
- Node* cond = graph()->NewNode(m->UintLessThan(), end_offset_node, mem_size);
+ Node* cond = gasm_->UintLessThan(end_offset_node, mem_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
} else {
// The end offset is smaller than the smallest memory, so only one check is
// required. Check to see if the index is also a constant.
UintPtrMatcher match(index);
- if (match.HasValue()) {
- uintptr_t index_val = match.Value();
+ if (match.HasResolvedValue()) {
+ uintptr_t index_val = match.ResolvedValue();
if (index_val < env_->min_memory_size - end_offset) {
// The input index is a constant and everything is statically within
// bounds of the smallest possible memory.
@@ -3724,18 +3723,17 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
}
// This produces a positive number, since {end_offset < min_size <= mem_size}.
- Node* effective_size =
- graph()->NewNode(m->IntSub(), mem_size, end_offset_node);
+ Node* effective_size = gasm_->IntSub(mem_size, end_offset_node);
// Introduce the actual bounds check.
- Node* cond = graph()->NewNode(m->UintLessThan(), index, effective_size);
+ Node* cond = gasm_->UintLessThan(index, effective_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
if (untrusted_code_mitigations_) {
// In the fallthrough case, condition the index with the memory mask.
Node* mem_mask = instance_cache_->mem_mask;
DCHECK_NOT_NULL(mem_mask);
- index = graph()->NewNode(m->WordAnd(), index, mem_mask);
+ index = gasm_->WordAnd(index, mem_mask);
}
return index;
}
@@ -3828,20 +3826,20 @@ Node* WasmGraphBuilder::TraceFunctionExit(Vector<Node*> vals,
Node* WasmGraphBuilder::TraceMemoryOperation(bool is_store,
MachineRepresentation rep,
- Node* index, uint32_t offset,
+ Node* index, uintptr_t offset,
wasm::WasmCodePosition position) {
int kAlign = 4; // Ensure that the LSB is 0, such that this looks like a Smi.
TNode<RawPtrT> info =
gasm_->StackSlot(sizeof(wasm::MemoryTracingInfo), kAlign);
- Node* address = gasm_->Int32Add(Int32Constant(offset), index);
- auto store = [&](int offset, MachineRepresentation rep, Node* data) {
+ Node* effective_offset = gasm_->IntAdd(gasm_->UintPtrConstant(offset), index);
+ auto store = [&](int field_offset, MachineRepresentation rep, Node* data) {
gasm_->Store(StoreRepresentation(rep, kNoWriteBarrier), info,
- gasm_->Int32Constant(offset), data);
+ gasm_->Int32Constant(field_offset), data);
};
- // Store address, is_store, and mem_rep.
- store(offsetof(wasm::MemoryTracingInfo, address),
- MachineRepresentation::kWord32, address);
+ // Store effective_offset, is_store, and mem_rep.
+ store(offsetof(wasm::MemoryTracingInfo, offset),
+ MachineType::PointerRepresentation(), effective_offset);
store(offsetof(wasm::MemoryTracingInfo, is_store),
MachineRepresentation::kWord8,
mcgraph()->Int32Constant(is_store ? 1 : 0));
@@ -3862,55 +3860,56 @@ LoadTransformation GetLoadTransformation(
switch (transform) {
case wasm::LoadTransformationKind::kSplat: {
if (memtype == MachineType::Int8()) {
- return LoadTransformation::kS8x16LoadSplat;
+ return LoadTransformation::kS128Load8Splat;
} else if (memtype == MachineType::Int16()) {
- return LoadTransformation::kS16x8LoadSplat;
+ return LoadTransformation::kS128Load16Splat;
} else if (memtype == MachineType::Int32()) {
- return LoadTransformation::kS32x4LoadSplat;
+ return LoadTransformation::kS128Load32Splat;
} else if (memtype == MachineType::Int64()) {
- return LoadTransformation::kS64x2LoadSplat;
+ return LoadTransformation::kS128Load64Splat;
}
break;
}
case wasm::LoadTransformationKind::kExtend: {
if (memtype == MachineType::Int8()) {
- return LoadTransformation::kI16x8Load8x8S;
+ return LoadTransformation::kS128Load8x8S;
} else if (memtype == MachineType::Uint8()) {
- return LoadTransformation::kI16x8Load8x8U;
+ return LoadTransformation::kS128Load8x8U;
} else if (memtype == MachineType::Int16()) {
- return LoadTransformation::kI32x4Load16x4S;
+ return LoadTransformation::kS128Load16x4S;
} else if (memtype == MachineType::Uint16()) {
- return LoadTransformation::kI32x4Load16x4U;
+ return LoadTransformation::kS128Load16x4U;
} else if (memtype == MachineType::Int32()) {
- return LoadTransformation::kI64x2Load32x2S;
+ return LoadTransformation::kS128Load32x2S;
} else if (memtype == MachineType::Uint32()) {
- return LoadTransformation::kI64x2Load32x2U;
+ return LoadTransformation::kS128Load32x2U;
}
break;
}
case wasm::LoadTransformationKind::kZeroExtend: {
if (memtype == MachineType::Int32()) {
- return LoadTransformation::kS128LoadMem32Zero;
+ return LoadTransformation::kS128Load32Zero;
} else if (memtype == MachineType::Int64()) {
- return LoadTransformation::kS128LoadMem64Zero;
+ return LoadTransformation::kS128Load64Zero;
}
+ break;
}
}
UNREACHABLE();
}
-LoadKind GetLoadKind(MachineGraph* mcgraph, MachineType memtype,
- bool use_trap_handler) {
+MemoryAccessKind GetMemoryAccessKind(MachineGraph* mcgraph, MachineType memtype,
+ bool use_trap_handler) {
if (memtype.representation() == MachineRepresentation::kWord8 ||
mcgraph->machine()->UnalignedLoadSupported(memtype.representation())) {
if (use_trap_handler) {
- return LoadKind::kProtected;
+ return MemoryAccessKind::kProtected;
}
- return LoadKind::kNormal;
+ return MemoryAccessKind::kNormal;
}
// TODO(eholk): Support unaligned loads with trap handlers.
DCHECK(!use_trap_handler);
- return LoadKind::kUnaligned;
+ return MemoryAccessKind::kUnaligned;
}
} // namespace
@@ -3920,7 +3919,7 @@ LoadKind GetLoadKind(MachineGraph* mcgraph, MachineType memtype,
#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
Node* WasmGraphBuilder::LoadTransformBigEndian(
wasm::ValueType type, MachineType memtype,
- wasm::LoadTransformationKind transform, Node* index, uint32_t offset,
+ wasm::LoadTransformationKind transform, Node* index, uint64_t offset,
uint32_t alignment, wasm::WasmCodePosition position) {
#define LOAD_EXTEND(num_lanes, bytes_per_load, replace_lane) \
result = graph()->NewNode(mcgraph()->machine()->S128Zero()); \
@@ -3946,41 +3945,55 @@ Node* WasmGraphBuilder::LoadTransformBigEndian(
LoadTransformation transformation = GetLoadTransformation(memtype, transform);
switch (transformation) {
- case LoadTransformation::kS8x16LoadSplat: {
+ case LoadTransformation::kS128Load8Splat: {
result = LoadMem(type, memtype, index, offset, alignment, position);
result = graph()->NewNode(mcgraph()->machine()->I8x16Splat(), result);
break;
}
- case LoadTransformation::kI16x8Load8x8S:
- case LoadTransformation::kI16x8Load8x8U: {
+ case LoadTransformation::kS128Load8x8S:
+ case LoadTransformation::kS128Load8x8U: {
LOAD_EXTEND(8, 1, I16x8ReplaceLane)
break;
}
- case LoadTransformation::kS16x8LoadSplat: {
+ case LoadTransformation::kS128Load16Splat: {
result = LoadMem(type, memtype, index, offset, alignment, position);
result = graph()->NewNode(mcgraph()->machine()->I16x8Splat(), result);
break;
}
- case LoadTransformation::kI32x4Load16x4S:
- case LoadTransformation::kI32x4Load16x4U: {
+ case LoadTransformation::kS128Load16x4S:
+ case LoadTransformation::kS128Load16x4U: {
LOAD_EXTEND(4, 2, I32x4ReplaceLane)
break;
}
- case LoadTransformation::kS32x4LoadSplat: {
+ case LoadTransformation::kS128Load32Splat: {
result = LoadMem(type, memtype, index, offset, alignment, position);
result = graph()->NewNode(mcgraph()->machine()->I32x4Splat(), result);
break;
}
- case LoadTransformation::kI64x2Load32x2S:
- case LoadTransformation::kI64x2Load32x2U: {
+ case LoadTransformation::kS128Load32x2S:
+ case LoadTransformation::kS128Load32x2U: {
LOAD_EXTEND(2, 4, I64x2ReplaceLane)
break;
}
- case LoadTransformation::kS64x2LoadSplat: {
+ case LoadTransformation::kS128Load64Splat: {
result = LoadMem(type, memtype, index, offset, alignment, position);
result = graph()->NewNode(mcgraph()->machine()->I64x2Splat(), result);
break;
}
+ case LoadTransformation::kS128Load32Zero: {
+ result = graph()->NewNode(mcgraph()->machine()->S128Zero());
+ result = graph()->NewNode(
+ mcgraph()->machine()->I32x4ReplaceLane(0), result,
+ LoadMem(type, memtype, index, offset, alignment, position));
+ break;
+ }
+ case LoadTransformation::kS128Load64Zero: {
+ result = graph()->NewNode(mcgraph()->machine()->S128Zero());
+ result = graph()->NewNode(
+ mcgraph()->machine()->I64x2ReplaceLane(0), result,
+ LoadMem(type, memtype, index, offset, alignment, position));
+ break;
+ }
default:
UNREACHABLE();
}
@@ -3990,14 +4003,44 @@ Node* WasmGraphBuilder::LoadTransformBigEndian(
}
#endif
+Node* WasmGraphBuilder::LoadLane(MachineType memtype, Node* value, Node* index,
+ uint32_t offset, uint8_t laneidx,
+ wasm::WasmCodePosition position) {
+ has_simd_ = true;
+ Node* load;
+ uint8_t access_size = memtype.MemSize();
+ index =
+ BoundsCheckMem(access_size, index, offset, position, kCanOmitBoundsCheck);
+
+ MemoryAccessKind load_kind =
+ GetMemoryAccessKind(mcgraph(), memtype, use_trap_handler());
+
+ load = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->LoadLane(load_kind, memtype, laneidx),
+ MemBuffer(offset), index, value, effect(), control()));
+
+ if (load_kind == MemoryAccessKind::kProtected) {
+ SetSourcePosition(load, position);
+ }
+
+ if (FLAG_trace_wasm_memory) {
+ TraceMemoryOperation(false, memtype.representation(), index, offset,
+ position);
+ }
+
+ return load;
+}
+
Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype,
wasm::LoadTransformationKind transform,
- Node* index, uint32_t offset,
+ Node* index, uint64_t offset,
uint32_t alignment,
wasm::WasmCodePosition position) {
has_simd_ = true;
Node* load;
+ // {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
+ uintptr_t capped_offset = static_cast<uintptr_t>(offset);
#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
// LoadTransform cannot efficiently be executed on BE machines as a
@@ -4005,7 +4048,7 @@ Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype,
// therefore we divide them into separate "load" and "operation" nodes.
load = LoadTransformBigEndian(type, memtype, transform, index, offset,
alignment, position);
- USE(GetLoadKind);
+ USE(GetMemoryAccessKind);
#else
// Wasm semantics throw on OOB. Introduce explicit bounds check and
// conditioning when not using the trap handler.
@@ -4018,26 +4061,27 @@ Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype,
BoundsCheckMem(access_size, index, offset, position, kCanOmitBoundsCheck);
LoadTransformation transformation = GetLoadTransformation(memtype, transform);
- LoadKind load_kind = GetLoadKind(mcgraph(), memtype, use_trap_handler());
+ MemoryAccessKind load_kind =
+ GetMemoryAccessKind(mcgraph(), memtype, use_trap_handler());
load = SetEffect(graph()->NewNode(
mcgraph()->machine()->LoadTransform(load_kind, transformation),
- MemBuffer(offset), index, effect(), control()));
+ MemBuffer(capped_offset), index, effect(), control()));
- if (load_kind == LoadKind::kProtected) {
+ if (load_kind == MemoryAccessKind::kProtected) {
SetSourcePosition(load, position);
}
#endif
if (FLAG_trace_wasm_memory) {
- TraceMemoryOperation(false, memtype.representation(), index, offset,
+ TraceMemoryOperation(false, memtype.representation(), index, capped_offset,
position);
}
return load;
}
Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
- Node* index, uint32_t offset,
+ Node* index, uint64_t offset,
uint32_t alignment,
wasm::WasmCodePosition position) {
Node* load;
@@ -4051,25 +4095,22 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
index = BoundsCheckMem(memtype.MemSize(), index, offset, position,
kCanOmitBoundsCheck);
+ // {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
+ uintptr_t capped_offset = static_cast<uintptr_t>(offset);
if (memtype.representation() == MachineRepresentation::kWord8 ||
mcgraph()->machine()->UnalignedLoadSupported(memtype.representation())) {
if (use_trap_handler()) {
- load = graph()->NewNode(mcgraph()->machine()->ProtectedLoad(memtype),
- MemBuffer(offset), index, effect(), control());
+ load = gasm_->ProtectedLoad(memtype, MemBuffer(capped_offset), index);
SetSourcePosition(load, position);
} else {
- load = graph()->NewNode(mcgraph()->machine()->Load(memtype),
- MemBuffer(offset), index, effect(), control());
+ load = gasm_->Load(memtype, MemBuffer(capped_offset), index);
}
} else {
// TODO(eholk): Support unaligned loads with trap handlers.
DCHECK(!use_trap_handler());
- load = graph()->NewNode(mcgraph()->machine()->UnalignedLoad(memtype),
- MemBuffer(offset), index, effect(), control());
+ load = gasm_->LoadUnaligned(memtype, MemBuffer(capped_offset), index);
}
- SetEffect(load);
-
#if defined(V8_TARGET_BIG_ENDIAN)
load = BuildChangeEndiannessLoad(load, memtype, type);
#endif
@@ -4077,26 +4118,53 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
if (type == wasm::kWasmI64 &&
ElementSizeInBytes(memtype.representation()) < 8) {
// TODO(titzer): TF zeroes the upper bits of 64-bit loads for subword sizes.
- if (memtype.IsSigned()) {
- // sign extend
- load = graph()->NewNode(mcgraph()->machine()->ChangeInt32ToInt64(), load);
- } else {
- // zero extend
- load =
- graph()->NewNode(mcgraph()->machine()->ChangeUint32ToUint64(), load);
- }
+ load = memtype.IsSigned()
+ ? gasm_->ChangeInt32ToInt64(load) // sign extend
+ : gasm_->ChangeUint32ToUint64(load); // zero extend
}
if (FLAG_trace_wasm_memory) {
- TraceMemoryOperation(false, memtype.representation(), index, offset,
+ TraceMemoryOperation(false, memtype.representation(), index, capped_offset,
position);
}
return load;
}
+Node* WasmGraphBuilder::StoreLane(MachineRepresentation mem_rep, Node* index,
+ uint32_t offset, uint32_t alignment,
+ Node* val, uint8_t laneidx,
+ wasm::WasmCodePosition position,
+ wasm::ValueType type) {
+ Node* store;
+ has_simd_ = true;
+ index = BoundsCheckMem(i::ElementSizeInBytes(mem_rep), index, offset,
+ position, kCanOmitBoundsCheck);
+
+ MachineType memtype = MachineType(mem_rep, MachineSemantic::kNone);
+ MemoryAccessKind load_kind =
+ GetMemoryAccessKind(mcgraph(), memtype, use_trap_handler());
+
+ // {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
+ uintptr_t capped_offset = static_cast<uintptr_t>(offset);
+
+ store = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->StoreLane(load_kind, mem_rep, laneidx),
+ MemBuffer(capped_offset), index, val, effect(), control()));
+
+ if (load_kind == MemoryAccessKind::kProtected) {
+ SetSourcePosition(store, position);
+ }
+
+ if (FLAG_trace_wasm_memory) {
+ TraceMemoryOperation(true, mem_rep, index, capped_offset, position);
+ }
+
+ return store;
+}
+
Node* WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
- uint32_t offset, uint32_t alignment, Node* val,
+ uint64_t offset, uint32_t alignment, Node* val,
wasm::WasmCodePosition position,
wasm::ValueType type) {
Node* store;
@@ -4112,32 +4180,27 @@ Node* WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
val = BuildChangeEndiannessStore(val, mem_rep, type);
#endif
+ // {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
+ uintptr_t capped_offset = static_cast<uintptr_t>(offset);
if (mem_rep == MachineRepresentation::kWord8 ||
mcgraph()->machine()->UnalignedStoreSupported(mem_rep)) {
if (use_trap_handler()) {
store =
- graph()->NewNode(mcgraph()->machine()->ProtectedStore(mem_rep),
- MemBuffer(offset), index, val, effect(), control());
+ gasm_->ProtectedStore(mem_rep, MemBuffer(capped_offset), index, val);
SetSourcePosition(store, position);
} else {
- StoreRepresentation rep(mem_rep, kNoWriteBarrier);
- store =
- graph()->NewNode(mcgraph()->machine()->Store(rep), MemBuffer(offset),
- index, val, effect(), control());
+ store = gasm_->Store(StoreRepresentation{mem_rep, kNoWriteBarrier},
+ MemBuffer(capped_offset), index, val);
}
} else {
// TODO(eholk): Support unaligned stores with trap handlers.
DCHECK(!use_trap_handler());
UnalignedStoreRepresentation rep(mem_rep);
- store =
- graph()->NewNode(mcgraph()->machine()->UnalignedStore(rep),
- MemBuffer(offset), index, val, effect(), control());
+ store = gasm_->StoreUnaligned(rep, MemBuffer(capped_offset), index, val);
}
- SetEffect(store);
-
if (FLAG_trace_wasm_memory) {
- TraceMemoryOperation(true, mem_rep, index, offset, position);
+ TraceMemoryOperation(true, mem_rep, index, capped_offset, position);
}
return store;
@@ -4200,8 +4263,8 @@ Node* WasmGraphBuilder::Uint32ToUintptr(Node* node) {
if (mcgraph()->machine()->Is32()) return node;
// Fold instances of ChangeUint32ToUint64(IntConstant) directly.
Uint32Matcher matcher(node);
- if (matcher.HasValue()) {
- uintptr_t value = matcher.Value();
+ if (matcher.HasResolvedValue()) {
+ uintptr_t value = matcher.ResolvedValue();
return mcgraph()->IntPtrConstant(bit_cast<intptr_t>(value));
}
return graph()->NewNode(mcgraph()->machine()->ChangeUint32ToUint64(), node);
@@ -4573,6 +4636,20 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]);
case wasm::kExprI64x2Neg:
return graph()->NewNode(mcgraph()->machine()->I64x2Neg(), inputs[0]);
+ case wasm::kExprI64x2SConvertI32x4Low:
+ return graph()->NewNode(mcgraph()->machine()->I64x2SConvertI32x4Low(),
+ inputs[0]);
+ case wasm::kExprI64x2SConvertI32x4High:
+ return graph()->NewNode(mcgraph()->machine()->I64x2SConvertI32x4High(),
+ inputs[0]);
+ case wasm::kExprI64x2UConvertI32x4Low:
+ return graph()->NewNode(mcgraph()->machine()->I64x2UConvertI32x4Low(),
+ inputs[0]);
+ case wasm::kExprI64x2UConvertI32x4High:
+ return graph()->NewNode(mcgraph()->machine()->I64x2UConvertI32x4High(),
+ inputs[0]);
+ case wasm::kExprI64x2BitMask:
+ return graph()->NewNode(mcgraph()->machine()->I64x2BitMask(), inputs[0]);
case wasm::kExprI64x2Shl:
return graph()->NewNode(mcgraph()->machine()->I64x2Shl(), inputs[0],
inputs[1]);
@@ -4588,51 +4665,27 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI64x2Mul:
return graph()->NewNode(mcgraph()->machine()->I64x2Mul(), inputs[0],
inputs[1]);
- case wasm::kExprI64x2MinS:
- return graph()->NewNode(mcgraph()->machine()->I64x2MinS(), inputs[0],
- inputs[1]);
- case wasm::kExprI64x2MaxS:
- return graph()->NewNode(mcgraph()->machine()->I64x2MaxS(), inputs[0],
- inputs[1]);
case wasm::kExprI64x2Eq:
return graph()->NewNode(mcgraph()->machine()->I64x2Eq(), inputs[0],
inputs[1]);
- case wasm::kExprI64x2Ne:
- return graph()->NewNode(mcgraph()->machine()->I64x2Ne(), inputs[0],
- inputs[1]);
- case wasm::kExprI64x2LtS:
- return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[1],
- inputs[0]);
- case wasm::kExprI64x2LeS:
- return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[1],
- inputs[0]);
- case wasm::kExprI64x2GtS:
- return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[0],
- inputs[1]);
- case wasm::kExprI64x2GeS:
- return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[0],
- inputs[1]);
case wasm::kExprI64x2ShrU:
return graph()->NewNode(mcgraph()->machine()->I64x2ShrU(), inputs[0],
inputs[1]);
- case wasm::kExprI64x2MinU:
- return graph()->NewNode(mcgraph()->machine()->I64x2MinU(), inputs[0],
- inputs[1]);
- case wasm::kExprI64x2MaxU:
- return graph()->NewNode(mcgraph()->machine()->I64x2MaxU(), inputs[0],
- inputs[1]);
- case wasm::kExprI64x2LtU:
- return graph()->NewNode(mcgraph()->machine()->I64x2GtU(), inputs[1],
- inputs[0]);
- case wasm::kExprI64x2LeU:
- return graph()->NewNode(mcgraph()->machine()->I64x2GeU(), inputs[1],
- inputs[0]);
- case wasm::kExprI64x2GtU:
- return graph()->NewNode(mcgraph()->machine()->I64x2GtU(), inputs[0],
- inputs[1]);
- case wasm::kExprI64x2GeU:
- return graph()->NewNode(mcgraph()->machine()->I64x2GeU(), inputs[0],
- inputs[1]);
+ case wasm::kExprI64x2ExtMulLowI32x4S:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ExtMulLowI32x4S(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI64x2ExtMulHighI32x4S:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ExtMulHighI32x4S(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI64x2ExtMulLowI32x4U:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ExtMulLowI32x4U(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI64x2ExtMulHighI32x4U:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ExtMulHighI32x4U(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI64x2SignSelect:
+ return graph()->NewNode(mcgraph()->machine()->I64x2SignSelect(),
+ inputs[0], inputs[1], inputs[2]);
case wasm::kExprI32x4Splat:
return graph()->NewNode(mcgraph()->machine()->I32x4Splat(), inputs[0]);
case wasm::kExprI32x4SConvertF32x4:
@@ -4725,6 +4778,27 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI32x4DotI16x8S:
return graph()->NewNode(mcgraph()->machine()->I32x4DotI16x8S(), inputs[0],
inputs[1]);
+ case wasm::kExprI32x4ExtMulLowI16x8S:
+ return graph()->NewNode(mcgraph()->machine()->I32x4ExtMulLowI16x8S(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI32x4ExtMulHighI16x8S:
+ return graph()->NewNode(mcgraph()->machine()->I32x4ExtMulHighI16x8S(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI32x4ExtMulLowI16x8U:
+ return graph()->NewNode(mcgraph()->machine()->I32x4ExtMulLowI16x8U(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI32x4ExtMulHighI16x8U:
+ return graph()->NewNode(mcgraph()->machine()->I32x4ExtMulHighI16x8U(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI32x4SignSelect:
+ return graph()->NewNode(mcgraph()->machine()->I32x4SignSelect(),
+ inputs[0], inputs[1], inputs[2]);
+ case wasm::kExprI32x4ExtAddPairwiseI16x8S:
+ return graph()->NewNode(mcgraph()->machine()->I32x4ExtAddPairwiseI16x8S(),
+ inputs[0]);
+ case wasm::kExprI32x4ExtAddPairwiseI16x8U:
+ return graph()->NewNode(mcgraph()->machine()->I32x4ExtAddPairwiseI16x8U(),
+ inputs[0]);
case wasm::kExprI16x8Splat:
return graph()->NewNode(mcgraph()->machine()->I16x8Splat(), inputs[0]);
case wasm::kExprI16x8SConvertI8x16Low:
@@ -4747,18 +4821,18 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI16x8Add:
return graph()->NewNode(mcgraph()->machine()->I16x8Add(), inputs[0],
inputs[1]);
- case wasm::kExprI16x8AddSaturateS:
- return graph()->NewNode(mcgraph()->machine()->I16x8AddSaturateS(),
- inputs[0], inputs[1]);
+ case wasm::kExprI16x8AddSatS:
+ return graph()->NewNode(mcgraph()->machine()->I16x8AddSatS(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8AddHoriz:
return graph()->NewNode(mcgraph()->machine()->I16x8AddHoriz(), inputs[0],
inputs[1]);
case wasm::kExprI16x8Sub:
return graph()->NewNode(mcgraph()->machine()->I16x8Sub(), inputs[0],
inputs[1]);
- case wasm::kExprI16x8SubSaturateS:
- return graph()->NewNode(mcgraph()->machine()->I16x8SubSaturateS(),
- inputs[0], inputs[1]);
+ case wasm::kExprI16x8SubSatS:
+ return graph()->NewNode(mcgraph()->machine()->I16x8SubSatS(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8Mul:
return graph()->NewNode(mcgraph()->machine()->I16x8Mul(), inputs[0],
inputs[1]);
@@ -4798,12 +4872,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI16x8ShrU:
return graph()->NewNode(mcgraph()->machine()->I16x8ShrU(), inputs[0],
inputs[1]);
- case wasm::kExprI16x8AddSaturateU:
- return graph()->NewNode(mcgraph()->machine()->I16x8AddSaturateU(),
- inputs[0], inputs[1]);
- case wasm::kExprI16x8SubSaturateU:
- return graph()->NewNode(mcgraph()->machine()->I16x8SubSaturateU(),
- inputs[0], inputs[1]);
+ case wasm::kExprI16x8AddSatU:
+ return graph()->NewNode(mcgraph()->machine()->I16x8AddSatU(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI16x8SubSatU:
+ return graph()->NewNode(mcgraph()->machine()->I16x8SubSatU(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8MinU:
return graph()->NewNode(mcgraph()->machine()->I16x8MinU(), inputs[0],
inputs[1]);
@@ -4825,10 +4899,34 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI16x8RoundingAverageU:
return graph()->NewNode(mcgraph()->machine()->I16x8RoundingAverageU(),
inputs[0], inputs[1]);
+ case wasm::kExprI16x8Q15MulRSatS:
+ return graph()->NewNode(mcgraph()->machine()->I16x8Q15MulRSatS(),
+ inputs[0], inputs[1]);
case wasm::kExprI16x8Abs:
return graph()->NewNode(mcgraph()->machine()->I16x8Abs(), inputs[0]);
case wasm::kExprI16x8BitMask:
return graph()->NewNode(mcgraph()->machine()->I16x8BitMask(), inputs[0]);
+ case wasm::kExprI16x8ExtMulLowI8x16S:
+ return graph()->NewNode(mcgraph()->machine()->I16x8ExtMulLowI8x16S(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI16x8ExtMulHighI8x16S:
+ return graph()->NewNode(mcgraph()->machine()->I16x8ExtMulHighI8x16S(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI16x8ExtMulLowI8x16U:
+ return graph()->NewNode(mcgraph()->machine()->I16x8ExtMulLowI8x16U(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI16x8ExtMulHighI8x16U:
+ return graph()->NewNode(mcgraph()->machine()->I16x8ExtMulHighI8x16U(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI16x8SignSelect:
+ return graph()->NewNode(mcgraph()->machine()->I16x8SignSelect(),
+ inputs[0], inputs[1], inputs[2]);
+ case wasm::kExprI16x8ExtAddPairwiseI8x16S:
+ return graph()->NewNode(mcgraph()->machine()->I16x8ExtAddPairwiseI8x16S(),
+ inputs[0]);
+ case wasm::kExprI16x8ExtAddPairwiseI8x16U:
+ return graph()->NewNode(mcgraph()->machine()->I16x8ExtAddPairwiseI8x16U(),
+ inputs[0]);
case wasm::kExprI8x16Splat:
return graph()->NewNode(mcgraph()->machine()->I8x16Splat(), inputs[0]);
case wasm::kExprI8x16Neg:
@@ -4845,15 +4943,15 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI8x16Add:
return graph()->NewNode(mcgraph()->machine()->I8x16Add(), inputs[0],
inputs[1]);
- case wasm::kExprI8x16AddSaturateS:
- return graph()->NewNode(mcgraph()->machine()->I8x16AddSaturateS(),
- inputs[0], inputs[1]);
+ case wasm::kExprI8x16AddSatS:
+ return graph()->NewNode(mcgraph()->machine()->I8x16AddSatS(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16Sub:
return graph()->NewNode(mcgraph()->machine()->I8x16Sub(), inputs[0],
inputs[1]);
- case wasm::kExprI8x16SubSaturateS:
- return graph()->NewNode(mcgraph()->machine()->I8x16SubSaturateS(),
- inputs[0], inputs[1]);
+ case wasm::kExprI8x16SubSatS:
+ return graph()->NewNode(mcgraph()->machine()->I8x16SubSatS(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16Mul:
return graph()->NewNode(mcgraph()->machine()->I8x16Mul(), inputs[0],
inputs[1]);
@@ -4887,12 +4985,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI8x16UConvertI16x8:
return graph()->NewNode(mcgraph()->machine()->I8x16UConvertI16x8(),
inputs[0], inputs[1]);
- case wasm::kExprI8x16AddSaturateU:
- return graph()->NewNode(mcgraph()->machine()->I8x16AddSaturateU(),
- inputs[0], inputs[1]);
- case wasm::kExprI8x16SubSaturateU:
- return graph()->NewNode(mcgraph()->machine()->I8x16SubSaturateU(),
- inputs[0], inputs[1]);
+ case wasm::kExprI8x16AddSatU:
+ return graph()->NewNode(mcgraph()->machine()->I8x16AddSatU(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI8x16SubSatU:
+ return graph()->NewNode(mcgraph()->machine()->I8x16SubSatU(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16MinU:
return graph()->NewNode(mcgraph()->machine()->I8x16MinU(), inputs[0],
inputs[1]);
@@ -4914,10 +5012,15 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI8x16RoundingAverageU:
return graph()->NewNode(mcgraph()->machine()->I8x16RoundingAverageU(),
inputs[0], inputs[1]);
+ case wasm::kExprI8x16Popcnt:
+ return graph()->NewNode(mcgraph()->machine()->I8x16Popcnt(), inputs[0]);
case wasm::kExprI8x16Abs:
return graph()->NewNode(mcgraph()->machine()->I8x16Abs(), inputs[0]);
case wasm::kExprI8x16BitMask:
return graph()->NewNode(mcgraph()->machine()->I8x16BitMask(), inputs[0]);
+ case wasm::kExprI8x16SignSelect:
+ return graph()->NewNode(mcgraph()->machine()->I8x16SignSelect(),
+ inputs[0], inputs[1], inputs[2]);
case wasm::kExprS128And:
return graph()->NewNode(mcgraph()->machine()->S128And(), inputs[0],
inputs[1]);
@@ -4935,10 +5038,6 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprS128AndNot:
return graph()->NewNode(mcgraph()->machine()->S128AndNot(), inputs[0],
inputs[1]);
- case wasm::kExprV64x2AnyTrue:
- return graph()->NewNode(mcgraph()->machine()->V64x2AnyTrue(), inputs[0]);
- case wasm::kExprV64x2AllTrue:
- return graph()->NewNode(mcgraph()->machine()->V64x2AllTrue(), inputs[0]);
case wasm::kExprV32x4AnyTrue:
return graph()->NewNode(mcgraph()->machine()->V32x4AnyTrue(), inputs[0]);
case wasm::kExprV32x4AllTrue:
@@ -5017,158 +5116,176 @@ Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16],
inputs[0], inputs[1]);
}
-#define ATOMIC_BINOP_LIST(V) \
- V(I32AtomicAdd, Add, Uint32, Word32) \
- V(I64AtomicAdd, Add, Uint64, Word64) \
- V(I32AtomicAdd8U, Add, Uint8, Word32) \
- V(I32AtomicAdd16U, Add, Uint16, Word32) \
- V(I64AtomicAdd8U, Add, Uint8, Word64) \
- V(I64AtomicAdd16U, Add, Uint16, Word64) \
- V(I64AtomicAdd32U, Add, Uint32, Word64) \
- V(I32AtomicSub, Sub, Uint32, Word32) \
- V(I64AtomicSub, Sub, Uint64, Word64) \
- V(I32AtomicSub8U, Sub, Uint8, Word32) \
- V(I32AtomicSub16U, Sub, Uint16, Word32) \
- V(I64AtomicSub8U, Sub, Uint8, Word64) \
- V(I64AtomicSub16U, Sub, Uint16, Word64) \
- V(I64AtomicSub32U, Sub, Uint32, Word64) \
- V(I32AtomicAnd, And, Uint32, Word32) \
- V(I64AtomicAnd, And, Uint64, Word64) \
- V(I32AtomicAnd8U, And, Uint8, Word32) \
- V(I64AtomicAnd16U, And, Uint16, Word64) \
- V(I32AtomicAnd16U, And, Uint16, Word32) \
- V(I64AtomicAnd8U, And, Uint8, Word64) \
- V(I64AtomicAnd32U, And, Uint32, Word64) \
- V(I32AtomicOr, Or, Uint32, Word32) \
- V(I64AtomicOr, Or, Uint64, Word64) \
- V(I32AtomicOr8U, Or, Uint8, Word32) \
- V(I32AtomicOr16U, Or, Uint16, Word32) \
- V(I64AtomicOr8U, Or, Uint8, Word64) \
- V(I64AtomicOr16U, Or, Uint16, Word64) \
- V(I64AtomicOr32U, Or, Uint32, Word64) \
- V(I32AtomicXor, Xor, Uint32, Word32) \
- V(I64AtomicXor, Xor, Uint64, Word64) \
- V(I32AtomicXor8U, Xor, Uint8, Word32) \
- V(I32AtomicXor16U, Xor, Uint16, Word32) \
- V(I64AtomicXor8U, Xor, Uint8, Word64) \
- V(I64AtomicXor16U, Xor, Uint16, Word64) \
- V(I64AtomicXor32U, Xor, Uint32, Word64) \
- V(I32AtomicExchange, Exchange, Uint32, Word32) \
- V(I64AtomicExchange, Exchange, Uint64, Word64) \
- V(I32AtomicExchange8U, Exchange, Uint8, Word32) \
- V(I32AtomicExchange16U, Exchange, Uint16, Word32) \
- V(I64AtomicExchange8U, Exchange, Uint8, Word64) \
- V(I64AtomicExchange16U, Exchange, Uint16, Word64) \
- V(I64AtomicExchange32U, Exchange, Uint32, Word64)
-
-#define ATOMIC_CMP_EXCHG_LIST(V) \
- V(I32AtomicCompareExchange, Uint32, Word32) \
- V(I64AtomicCompareExchange, Uint64, Word64) \
- V(I32AtomicCompareExchange8U, Uint8, Word32) \
- V(I32AtomicCompareExchange16U, Uint16, Word32) \
- V(I64AtomicCompareExchange8U, Uint8, Word64) \
- V(I64AtomicCompareExchange16U, Uint16, Word64) \
- V(I64AtomicCompareExchange32U, Uint32, Word64)
-
-#define ATOMIC_LOAD_LIST(V) \
- V(I32AtomicLoad, Uint32, Word32) \
- V(I64AtomicLoad, Uint64, Word64) \
- V(I32AtomicLoad8U, Uint8, Word32) \
- V(I32AtomicLoad16U, Uint16, Word32) \
- V(I64AtomicLoad8U, Uint8, Word64) \
- V(I64AtomicLoad16U, Uint16, Word64) \
- V(I64AtomicLoad32U, Uint32, Word64)
-
-#define ATOMIC_STORE_LIST(V) \
- V(I32AtomicStore, Uint32, kWord32, Word32) \
- V(I64AtomicStore, Uint64, kWord64, Word64) \
- V(I32AtomicStore8U, Uint8, kWord8, Word32) \
- V(I32AtomicStore16U, Uint16, kWord16, Word32) \
- V(I64AtomicStore8U, Uint8, kWord8, Word64) \
- V(I64AtomicStore16U, Uint16, kWord16, Word64) \
- V(I64AtomicStore32U, Uint32, kWord32, Word64)
-
Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
- uint32_t alignment, uint32_t offset,
+ uint32_t alignment, uint64_t offset,
wasm::WasmCodePosition position) {
- Node* node;
+ struct AtomicOpInfo {
+ enum Type : int8_t {
+ kNoInput = 0,
+ kOneInput = 1,
+ kTwoInputs = 2,
+ kSpecial
+ };
+
+ using OperatorByType =
+ const Operator* (MachineOperatorBuilder::*)(MachineType);
+ using OperatorByRep =
+ const Operator* (MachineOperatorBuilder::*)(MachineRepresentation);
+
+ const Type type;
+ const MachineType machine_type;
+ const OperatorByType operator_by_type = nullptr;
+ const OperatorByRep operator_by_rep = nullptr;
+
+ constexpr AtomicOpInfo(Type t, MachineType m, OperatorByType o)
+ : type(t), machine_type(m), operator_by_type(o) {}
+ constexpr AtomicOpInfo(Type t, MachineType m, OperatorByRep o)
+ : type(t), machine_type(m), operator_by_rep(o) {}
+
+ // Constexpr, hence just a table lookup in most compilers.
+ static constexpr AtomicOpInfo Get(wasm::WasmOpcode opcode) {
+ switch (opcode) {
+#define CASE(Name, Type, MachType, Op) \
+ case wasm::kExpr##Name: \
+ return {Type, MachineType::MachType(), &MachineOperatorBuilder::Op};
+
+ // Binops.
+ CASE(I32AtomicAdd, kOneInput, Uint32, Word32AtomicAdd)
+ CASE(I64AtomicAdd, kOneInput, Uint64, Word64AtomicAdd)
+ CASE(I32AtomicAdd8U, kOneInput, Uint8, Word32AtomicAdd)
+ CASE(I32AtomicAdd16U, kOneInput, Uint16, Word32AtomicAdd)
+ CASE(I64AtomicAdd8U, kOneInput, Uint8, Word64AtomicAdd)
+ CASE(I64AtomicAdd16U, kOneInput, Uint16, Word64AtomicAdd)
+ CASE(I64AtomicAdd32U, kOneInput, Uint32, Word64AtomicAdd)
+ CASE(I32AtomicSub, kOneInput, Uint32, Word32AtomicSub)
+ CASE(I64AtomicSub, kOneInput, Uint64, Word64AtomicSub)
+ CASE(I32AtomicSub8U, kOneInput, Uint8, Word32AtomicSub)
+ CASE(I32AtomicSub16U, kOneInput, Uint16, Word32AtomicSub)
+ CASE(I64AtomicSub8U, kOneInput, Uint8, Word64AtomicSub)
+ CASE(I64AtomicSub16U, kOneInput, Uint16, Word64AtomicSub)
+ CASE(I64AtomicSub32U, kOneInput, Uint32, Word64AtomicSub)
+ CASE(I32AtomicAnd, kOneInput, Uint32, Word32AtomicAnd)
+ CASE(I64AtomicAnd, kOneInput, Uint64, Word64AtomicAnd)
+ CASE(I32AtomicAnd8U, kOneInput, Uint8, Word32AtomicAnd)
+ CASE(I32AtomicAnd16U, kOneInput, Uint16, Word32AtomicAnd)
+ CASE(I64AtomicAnd8U, kOneInput, Uint8, Word64AtomicAnd)
+ CASE(I64AtomicAnd16U, kOneInput, Uint16, Word64AtomicAnd)
+ CASE(I64AtomicAnd32U, kOneInput, Uint32, Word64AtomicAnd)
+ CASE(I32AtomicOr, kOneInput, Uint32, Word32AtomicOr)
+ CASE(I64AtomicOr, kOneInput, Uint64, Word64AtomicOr)
+ CASE(I32AtomicOr8U, kOneInput, Uint8, Word32AtomicOr)
+ CASE(I32AtomicOr16U, kOneInput, Uint16, Word32AtomicOr)
+ CASE(I64AtomicOr8U, kOneInput, Uint8, Word64AtomicOr)
+ CASE(I64AtomicOr16U, kOneInput, Uint16, Word64AtomicOr)
+ CASE(I64AtomicOr32U, kOneInput, Uint32, Word64AtomicOr)
+ CASE(I32AtomicXor, kOneInput, Uint32, Word32AtomicXor)
+ CASE(I64AtomicXor, kOneInput, Uint64, Word64AtomicXor)
+ CASE(I32AtomicXor8U, kOneInput, Uint8, Word32AtomicXor)
+ CASE(I32AtomicXor16U, kOneInput, Uint16, Word32AtomicXor)
+ CASE(I64AtomicXor8U, kOneInput, Uint8, Word64AtomicXor)
+ CASE(I64AtomicXor16U, kOneInput, Uint16, Word64AtomicXor)
+ CASE(I64AtomicXor32U, kOneInput, Uint32, Word64AtomicXor)
+ CASE(I32AtomicExchange, kOneInput, Uint32, Word32AtomicExchange)
+ CASE(I64AtomicExchange, kOneInput, Uint64, Word64AtomicExchange)
+ CASE(I32AtomicExchange8U, kOneInput, Uint8, Word32AtomicExchange)
+ CASE(I32AtomicExchange16U, kOneInput, Uint16, Word32AtomicExchange)
+ CASE(I64AtomicExchange8U, kOneInput, Uint8, Word64AtomicExchange)
+ CASE(I64AtomicExchange16U, kOneInput, Uint16, Word64AtomicExchange)
+ CASE(I64AtomicExchange32U, kOneInput, Uint32, Word64AtomicExchange)
+
+ // Compare-exchange.
+ CASE(I32AtomicCompareExchange, kTwoInputs, Uint32,
+ Word32AtomicCompareExchange)
+ CASE(I64AtomicCompareExchange, kTwoInputs, Uint64,
+ Word64AtomicCompareExchange)
+ CASE(I32AtomicCompareExchange8U, kTwoInputs, Uint8,
+ Word32AtomicCompareExchange)
+ CASE(I32AtomicCompareExchange16U, kTwoInputs, Uint16,
+ Word32AtomicCompareExchange)
+ CASE(I64AtomicCompareExchange8U, kTwoInputs, Uint8,
+ Word64AtomicCompareExchange)
+ CASE(I64AtomicCompareExchange16U, kTwoInputs, Uint16,
+ Word64AtomicCompareExchange)
+ CASE(I64AtomicCompareExchange32U, kTwoInputs, Uint32,
+ Word64AtomicCompareExchange)
+
+ // Load.
+ CASE(I32AtomicLoad, kNoInput, Uint32, Word32AtomicLoad)
+ CASE(I64AtomicLoad, kNoInput, Uint64, Word64AtomicLoad)
+ CASE(I32AtomicLoad8U, kNoInput, Uint8, Word32AtomicLoad)
+ CASE(I32AtomicLoad16U, kNoInput, Uint16, Word32AtomicLoad)
+ CASE(I64AtomicLoad8U, kNoInput, Uint8, Word64AtomicLoad)
+ CASE(I64AtomicLoad16U, kNoInput, Uint16, Word64AtomicLoad)
+ CASE(I64AtomicLoad32U, kNoInput, Uint32, Word64AtomicLoad)
+
+ // Store.
+ CASE(I32AtomicStore, kOneInput, Uint32, Word32AtomicStore)
+ CASE(I64AtomicStore, kOneInput, Uint64, Word64AtomicStore)
+ CASE(I32AtomicStore8U, kOneInput, Uint8, Word32AtomicStore)
+ CASE(I32AtomicStore16U, kOneInput, Uint16, Word32AtomicStore)
+ CASE(I64AtomicStore8U, kOneInput, Uint8, Word64AtomicStore)
+ CASE(I64AtomicStore16U, kOneInput, Uint16, Word64AtomicStore)
+ CASE(I64AtomicStore32U, kOneInput, Uint32, Word64AtomicStore)
+
+#undef CASE
+
+ case wasm::kExprAtomicNotify:
+ return {kSpecial, MachineType::Int32(), OperatorByType{nullptr}};
+ case wasm::kExprI32AtomicWait:
+ return {kSpecial, MachineType::Int32(), OperatorByType{nullptr}};
+ case wasm::kExprI64AtomicWait:
+ return {kSpecial, MachineType::Int64(), OperatorByType{nullptr}};
+ default:
+#if V8_HAS_CXX14_CONSTEXPR
+ UNREACHABLE();
+#else
+ // Return something for older GCC.
+ return {kSpecial, MachineType::Int64(), OperatorByType{nullptr}};
+#endif
+ }
+ }
+ };
+
+ AtomicOpInfo info = AtomicOpInfo::Get(opcode);
+
+ Node* index = CheckBoundsAndAlignment(info.machine_type.MemSize(), inputs[0],
+ offset, position);
+
+ // {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
+ uintptr_t capped_offset = static_cast<uintptr_t>(offset);
+ if (info.type != AtomicOpInfo::kSpecial) {
+ const Operator* op =
+ info.operator_by_type
+ ? (mcgraph()->machine()->*info.operator_by_type)(info.machine_type)
+ : (mcgraph()->machine()->*info.operator_by_rep)(
+ info.machine_type.representation());
+
+ Node* input_nodes[6] = {MemBuffer(capped_offset), index};
+ int num_actual_inputs = info.type;
+ std::copy_n(inputs + 1, num_actual_inputs, input_nodes + 2);
+ input_nodes[num_actual_inputs + 2] = effect();
+ input_nodes[num_actual_inputs + 3] = control();
+ return gasm_->AddNode(
+ graph()->NewNode(op, num_actual_inputs + 4, input_nodes));
+ }
+
+ // After we've bounds-checked, compute the effective offset.
+ Node* effective_offset =
+ gasm_->IntAdd(gasm_->UintPtrConstant(capped_offset), index);
+
switch (opcode) {
-#define BUILD_ATOMIC_BINOP(Name, Operation, Type, Prefix) \
- case wasm::kExpr##Name: { \
- Node* index = CheckBoundsAndAlignment(MachineType::Type().MemSize(), \
- inputs[0], offset, position); \
- node = graph()->NewNode( \
- mcgraph()->machine()->Prefix##Atomic##Operation(MachineType::Type()), \
- MemBuffer(offset), index, inputs[1], effect(), control()); \
- break; \
- }
- ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP)
-#undef BUILD_ATOMIC_BINOP
-
-#define BUILD_ATOMIC_CMP_EXCHG(Name, Type, Prefix) \
- case wasm::kExpr##Name: { \
- Node* index = CheckBoundsAndAlignment(MachineType::Type().MemSize(), \
- inputs[0], offset, position); \
- node = graph()->NewNode( \
- mcgraph()->machine()->Prefix##AtomicCompareExchange( \
- MachineType::Type()), \
- MemBuffer(offset), index, inputs[1], inputs[2], effect(), control()); \
- break; \
- }
- ATOMIC_CMP_EXCHG_LIST(BUILD_ATOMIC_CMP_EXCHG)
-#undef BUILD_ATOMIC_CMP_EXCHG
-
-#define BUILD_ATOMIC_LOAD_OP(Name, Type, Prefix) \
- case wasm::kExpr##Name: { \
- Node* index = CheckBoundsAndAlignment(MachineType::Type().MemSize(), \
- inputs[0], offset, position); \
- node = graph()->NewNode( \
- mcgraph()->machine()->Prefix##AtomicLoad(MachineType::Type()), \
- MemBuffer(offset), index, effect(), control()); \
- break; \
- }
- ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP)
-#undef BUILD_ATOMIC_LOAD_OP
-
-#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep, Prefix) \
- case wasm::kExpr##Name: { \
- Node* index = CheckBoundsAndAlignment(MachineType::Type().MemSize(), \
- inputs[0], offset, position); \
- node = graph()->NewNode( \
- mcgraph()->machine()->Prefix##AtomicStore(MachineRepresentation::Rep), \
- MemBuffer(offset), index, inputs[1], effect(), control()); \
- break; \
- }
- ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP)
-#undef BUILD_ATOMIC_STORE_OP
case wasm::kExprAtomicNotify: {
- Node* index = CheckBoundsAndAlignment(MachineType::Uint32().MemSize(),
- inputs[0], offset, position);
- // Now that we've bounds-checked, compute the effective address.
- Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
- Uint32Constant(offset), index);
- WasmAtomicNotifyDescriptor interface_descriptor;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), interface_descriptor,
- interface_descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- StubCallMode::kCallWasmRuntimeStub);
+ auto* call_descriptor =
+ GetBuiltinCallDescriptor<WasmAtomicNotifyDescriptor>(
+ this, StubCallMode::kCallWasmRuntimeStub);
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmAtomicNotify, RelocInfo::WASM_STUB_CALL);
- node = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- call_target, address, inputs[1], effect(),
- control());
- break;
+ return gasm_->Call(call_descriptor, call_target, effective_offset,
+ inputs[1]);
}
case wasm::kExprI32AtomicWait: {
- Node* index = CheckBoundsAndAlignment(MachineType::Uint32().MemSize(),
- inputs[0], offset, position);
- // Now that we've bounds-checked, compute the effective address.
- Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
- Uint32Constant(offset), index);
-
- auto call_descriptor = GetI32AtomicWaitCallDescriptor();
+ auto* call_descriptor = GetI32AtomicWaitCallDescriptor();
intptr_t target = mcgraph()->machine()->Is64()
? wasm::WasmCode::kWasmI32AtomicWait64
@@ -5176,20 +5293,12 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
target, RelocInfo::WASM_STUB_CALL);
- node = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- call_target, address, inputs[1], inputs[2],
- effect(), control());
- break;
+ return gasm_->Call(call_descriptor, call_target, effective_offset,
+ inputs[1], inputs[2]);
}
case wasm::kExprI64AtomicWait: {
- Node* index = CheckBoundsAndAlignment(MachineType::Uint64().MemSize(),
- inputs[0], offset, position);
- // Now that we've bounds-checked, compute the effective address.
- Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
- Uint32Constant(offset), index);
-
- CallDescriptor* call_descriptor = GetI64AtomicWaitCallDescriptor();
+ auto* call_descriptor = GetI64AtomicWaitCallDescriptor();
intptr_t target = mcgraph()->machine()->Is64()
? wasm::WasmCode::kWasmI64AtomicWait64
@@ -5197,16 +5306,13 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
target, RelocInfo::WASM_STUB_CALL);
- node = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- call_target, address, inputs[1], inputs[2],
- effect(), control());
- break;
+ return gasm_->Call(call_descriptor, call_target, effective_offset,
+ inputs[1], inputs[2]);
}
default:
FATAL_UNSUPPORTED_OPCODE(opcode);
}
- return SetEffect(node);
}
Node* WasmGraphBuilder::AtomicFence() {
@@ -5214,11 +5320,6 @@ Node* WasmGraphBuilder::AtomicFence() {
effect(), control()));
}
-#undef ATOMIC_BINOP_LIST
-#undef ATOMIC_CMP_EXCHG_LIST
-#undef ATOMIC_LOAD_LIST
-#undef ATOMIC_STORE_LIST
-
Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
@@ -6157,9 +6258,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
wasm::ValueType type) {
// Make sure ValueType fits in a Smi.
STATIC_ASSERT(wasm::ValueType::kLastUsedBit + 1 <= kSmiValueSize);
- Node* inputs[] = {
- instance_node_.get(), input,
- IntPtrConstant(IntToSmi(static_cast<int>(type.raw_bit_field())))};
+ Node* inputs[] = {instance_node_.get(), input,
+ mcgraph()->IntPtrConstant(
+ IntToSmi(static_cast<int>(type.raw_bit_field())))};
Node* check = BuildChangeSmiToInt32(SetEffect(BuildCallToRuntimeWithContext(
Runtime::kWasmIsValidRefValue, js_context, inputs, 3)));
@@ -6169,7 +6270,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetControl(type_check.if_false);
Node* old_effect = effect();
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowJSTypeError, js_context,
nullptr, 0);
SetEffectControl(type_check.EffectPhi(old_effect, effect()),
@@ -6241,14 +6342,38 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
BuildChangeSmiToInt32(input));
}
+ Node* HeapNumberToFloat64(Node* input) {
+ return gasm_->Load(MachineType::Float64(), input,
+ wasm::ObjectAccess::ToTagged(HeapNumber::kValueOffset));
+ }
+
Node* FromJSFast(Node* input, wasm::ValueType type) {
switch (type.kind()) {
case wasm::ValueType::kI32:
return BuildChangeSmiToInt32(input);
- case wasm::ValueType::kF32:
- return SmiToFloat32(input);
- case wasm::ValueType::kF64:
- return SmiToFloat64(input);
+ case wasm::ValueType::kF32: {
+ auto done = gasm_->MakeLabel(MachineRepresentation::kFloat32);
+ auto heap_number = gasm_->MakeLabel();
+ gasm_->GotoIfNot(IsSmi(input), &heap_number);
+ gasm_->Goto(&done, SmiToFloat32(input));
+ gasm_->Bind(&heap_number);
+ Node* value =
+ graph()->NewNode(mcgraph()->machine()->TruncateFloat64ToFloat32(),
+ HeapNumberToFloat64(input));
+ gasm_->Goto(&done, value);
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+ }
+ case wasm::ValueType::kF64: {
+ auto done = gasm_->MakeLabel(MachineRepresentation::kFloat64);
+ auto heap_number = gasm_->MakeLabel();
+ gasm_->GotoIfNot(IsSmi(input), &heap_number);
+ gasm_->Goto(&done, SmiToFloat64(input));
+ gasm_->Bind(&heap_number);
+ gasm_->Goto(&done, HeapNumberToFloat64(input));
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+ }
case wasm::ValueType::kRef:
case wasm::ValueType::kOptRef:
case wasm::ValueType::kI64:
@@ -6313,7 +6438,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig,
Node* iterable, Node* context) {
Node* length = BuildChangeUint31ToSmi(
- Uint32Constant(static_cast<uint32_t>(sig->return_count())));
+ mcgraph()->Uint32Constant(static_cast<uint32_t>(sig->return_count())));
return CALL_BUILTIN(IterableToFixedArrayForWasm, iterable, length, context);
}
@@ -6420,12 +6545,30 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
gasm_->Int32Constant(0));
}
- Node* CanTransformFast(Node* input, wasm::ValueType type) {
+ void CanTransformFast(
+ Node* input, wasm::ValueType type,
+ v8::internal::compiler::GraphAssemblerLabel<0>* slow_path) {
switch (type.kind()) {
- case wasm::ValueType::kI32:
- case wasm::ValueType::kF64:
+ case wasm::ValueType::kI32: {
+ gasm_->GotoIfNot(IsSmi(input), slow_path);
+ return;
+ }
case wasm::ValueType::kF32:
- return IsSmi(input);
+ case wasm::ValueType::kF64: {
+ auto done = gasm_->MakeLabel();
+ gasm_->GotoIf(IsSmi(input), &done);
+ Node* map =
+ gasm_->Load(MachineType::TaggedPointer(), input,
+ wasm::ObjectAccess::ToTagged(HeapObject::kMapOffset));
+ Node* heap_number_map = LOAD_FULL_POINTER(
+ BuildLoadIsolateRoot(),
+ IsolateData::root_slot_offset(RootIndex::kHeapNumberMap));
+ Node* is_heap_number = gasm_->WordEqual(heap_number_map, map);
+ gasm_->GotoIf(is_heap_number, &done);
+ gasm_->Goto(slow_path);
+ gasm_->Bind(&done);
+ return;
+ }
case wasm::ValueType::kRef:
case wasm::ValueType::kOptRef:
case wasm::ValueType::kI64:
@@ -6460,7 +6603,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// an actual reference to an instance or a placeholder reference,
// called {WasmExportedFunction} via the {WasmExportedFunctionData}
// structure.
- Node* function_data = BuildLoadFunctionDataFromExportedFunction(js_closure);
+ Node* function_data = BuildLoadFunctionDataFromJSFunction(js_closure);
instance_node_.set(
BuildLoadInstanceFromExportedFunctionData(function_data));
@@ -6468,7 +6611,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Throw a TypeError. Use the js_context of the calling javascript
// function (passed as a parameter), such that the generated code is
// js_context independent.
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowJSTypeError, js_context,
nullptr, 0);
TerminateThrow(effect(), control());
return;
@@ -6494,8 +6637,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// fast is encountered, skip checking the rest and fall back to the slow
// path.
for (int i = 0; i < wasm_count; ++i) {
- gasm_->GotoIfNot(CanTransformFast(params[i + 1], sig_->GetParam(i)),
- &slow_path);
+ CanTransformFast(params[i + 1], sig_->GetParam(i), &slow_path);
}
// Convert JS parameters to wasm numbers using the fast transformation
// and build the call.
@@ -6557,7 +6699,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
global_proxy);
}
- bool BuildWasmImportCallWrapper(WasmImportCallKind kind, int expected_arity) {
+ bool BuildWasmToJSWrapper(WasmImportCallKind kind, int expected_arity) {
int wasm_count = static_cast<int>(sig_->parameter_count());
// Build the start and the parameter nodes.
@@ -6572,7 +6714,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// =======================================================================
// === Runtime TypeError =================================================
// =======================================================================
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError,
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowJSTypeError,
native_context, nullptr, 0);
TerminateThrow(effect(), control());
return false;
@@ -6621,8 +6763,45 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args.begin());
break;
}
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
// =======================================================================
- // === JS Functions with arguments adapter ===============================
+ // === JS Functions with mismatching arity ===============================
+ // =======================================================================
+ case WasmImportCallKind::kJSFunctionArityMismatch: {
+ int pushed_count = std::max(expected_arity, wasm_count);
+ base::SmallVector<Node*, 16> args(pushed_count + 7);
+ int pos = 0;
+
+ args[pos++] = callable_node; // target callable.
+ // Determine receiver at runtime.
+ args[pos++] =
+ BuildReceiverNode(callable_node, native_context, undefined_node);
+
+ // Convert wasm numbers to JS values.
+ pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_);
+ for (int i = wasm_count; i < expected_arity; ++i) {
+ args[pos++] = undefined_node;
+ }
+ args[pos++] = undefined_node; // new target
+ args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
+
+ Node* function_context =
+ gasm_->Load(MachineType::TaggedPointer(), callable_node,
+ wasm::ObjectAccess::ContextOffsetInTaggedJSFunction());
+ args[pos++] = function_context;
+ args[pos++] = effect();
+ args[pos++] = control();
+ DCHECK_EQ(pos, args.size());
+
+ auto call_descriptor = Linkage::GetJSCallDescriptor(
+ graph()->zone(), false, pushed_count + 1, CallDescriptor::kNoFlags);
+ call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
+ args.begin());
+ break;
+ }
+#else
+ // =======================================================================
+ // === JS Functions with mismatching arity ===============================
// =======================================================================
case WasmImportCallKind::kJSFunctionArityMismatch: {
base::SmallVector<Node*, 16> args(wasm_count + 9);
@@ -6630,9 +6809,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* function_context =
gasm_->Load(MachineType::TaggedPointer(), callable_node,
wasm::ObjectAccess::ContextOffsetInTaggedJSFunction());
- args[pos++] = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kArgumentsAdaptorTrampoline,
- RelocInfo::WASM_STUB_CALL);
+ args[pos++] =
+ GetBuiltinPointerTarget(Builtins::kArgumentsAdaptorTrampoline);
args[pos++] = callable_node; // target callable
args[pos++] = undefined_node; // new target
args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
@@ -6657,7 +6835,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), ArgumentsAdaptorDescriptor{}, 1 + wasm_count,
CallDescriptor::kNoFlags, Operator::kNoProperties,
- StubCallMode::kCallWasmRuntimeStub);
+ StubCallMode::kCallBuiltinPointer);
// Convert wasm numbers to JS values.
pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_);
@@ -6670,47 +6848,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args.begin());
break;
}
- // =======================================================================
- // === JS Functions without arguments adapter ============================
- // =======================================================================
- case WasmImportCallKind::kJSFunctionArityMismatchSkipAdaptor: {
- base::SmallVector<Node*, 16> args(expected_arity + 7);
- int pos = 0;
- Node* function_context =
- gasm_->Load(MachineType::TaggedPointer(), callable_node,
- wasm::ObjectAccess::ContextOffsetInTaggedJSFunction());
- args[pos++] = callable_node; // target callable.
-
- // Determine receiver at runtime.
- args[pos++] =
- BuildReceiverNode(callable_node, native_context, undefined_node);
-
- auto call_descriptor = Linkage::GetJSCallDescriptor(
- graph()->zone(), false, expected_arity + 1,
- CallDescriptor::kNoFlags);
-
- // Convert wasm numbers to JS values.
- if (expected_arity <= wasm_count) {
- pos = AddArgumentNodes(VectorOf(args), pos, expected_arity, sig_);
- } else {
- pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_);
- for (int i = wasm_count; i < expected_arity; ++i) {
- args[pos++] = undefined_node;
- }
- }
-
- args[pos++] = undefined_node; // new target
- args[pos++] =
- mcgraph()->Int32Constant(expected_arity); // argument count
- args[pos++] = function_context;
- args[pos++] = effect();
- args[pos++] = control();
-
- DCHECK_EQ(pos, args.size());
- call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
- args.begin());
- break;
- }
+#endif
// =======================================================================
// === General case of unknown callable ==================================
// =======================================================================
@@ -6837,11 +6975,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
BuildModifyThreadInWasmFlag(true);
- Node* exception_branch =
- graph()->NewNode(mcgraph()->common()->Branch(BranchHint::kTrue),
- graph()->NewNode(mcgraph()->machine()->WordEqual(),
- return_value, IntPtrConstant(0)),
- control());
+ Node* exception_branch = graph()->NewNode(
+ mcgraph()->common()->Branch(BranchHint::kTrue),
+ graph()->NewNode(mcgraph()->machine()->WordEqual(), return_value,
+ mcgraph()->IntPtrConstant(0)),
+ control());
SetControl(
graph()->NewNode(mcgraph()->common()->IfFalse(), exception_branch));
WasmThrowDescriptor interface_descriptor;
@@ -6897,7 +7035,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Throw a TypeError if the signature is incompatible with JavaScript.
if (!wasm::IsJSCompatibleSignature(sig_, module_, enabled_features_)) {
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, context,
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowJSTypeError, context,
nullptr, 0);
TerminateThrow(effect(), control());
return;
@@ -7114,17 +7252,17 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
const wasm::WasmFeatures& enabled_features) {
if (WasmExportedFunction::IsWasmExportedFunction(*callable)) {
auto imported_function = Handle<WasmExportedFunction>::cast(callable);
- auto func_index = imported_function->function_index();
- auto module = imported_function->instance().module();
- const wasm::FunctionSig* imported_sig = module->functions[func_index].sig;
- if (*imported_sig != *expected_sig) {
+ if (!imported_function->MatchesSignature(module, expected_sig)) {
return std::make_pair(WasmImportCallKind::kLinkError, callable);
}
- if (static_cast<uint32_t>(func_index) >= module->num_imported_functions) {
+ uint32_t func_index =
+ static_cast<uint32_t>(imported_function->function_index());
+ if (func_index >=
+ imported_function->instance().module()->num_imported_functions) {
return std::make_pair(WasmImportCallKind::kWasmToWasm, callable);
}
Isolate* isolate = callable->GetIsolate();
- // Resolve the short-cut to the underlying callable and continue.
+ // Resolve the shortcut to the underlying callable and continue.
Handle<WasmInstanceObject> instance(imported_function->instance(), isolate);
ImportedFunctionEntry entry(instance, func_index);
callable = handle(entry.callable(), isolate);
@@ -7224,14 +7362,6 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope);
}
-#ifndef V8_REVERSE_JSARGS
- // This optimization is disabled when the arguments are reversed. It will be
- // subsumed when the argumens adaptor frame is removed.
- if (shared->is_safe_to_skip_arguments_adaptor()) {
- return std::make_pair(
- WasmImportCallKind::kJSFunctionArityMismatchSkipAdaptor, callable);
- }
-#endif
return std::make_pair(WasmImportCallKind::kJSFunctionArityMismatch,
callable);
@@ -7378,7 +7508,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
WasmWrapperGraphBuilder builder(
&zone, mcgraph, sig, env->module, source_position_table,
StubCallMode::kCallWasmRuntimeStub, env->enabled_features);
- builder.BuildWasmImportCallWrapper(kind, expected_arity);
+ builder.BuildWasmToJSWrapper(kind, expected_arity);
// Build a name in the form "wasm-to-js-<kind>-<signature>".
constexpr size_t kMaxNameLen = 128;
@@ -7455,6 +7585,57 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
return native_module->PublishCode(std::move(wasm_code));
}
+MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
+ const wasm::FunctionSig* sig,
+ WasmImportCallKind kind,
+ int expected_arity) {
+ std::unique_ptr<Zone> zone = std::make_unique<Zone>(
+ isolate->allocator(), ZONE_NAME, kCompressGraphZone);
+
+ // Create the Graph
+ Graph* graph = zone->New<Graph>(zone.get());
+ CommonOperatorBuilder* common = zone->New<CommonOperatorBuilder>(zone.get());
+ MachineOperatorBuilder* machine = zone->New<MachineOperatorBuilder>(
+ zone.get(), MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements());
+ MachineGraph* mcgraph = zone->New<MachineGraph>(graph, common, machine);
+
+ WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, nullptr, nullptr,
+ StubCallMode::kCallWasmRuntimeStub,
+ wasm::WasmFeatures::FromIsolate(isolate));
+ builder.BuildWasmToJSWrapper(kind, expected_arity);
+
+ // Build a name in the form "wasm-to-js-<kind>-<signature>".
+ constexpr size_t kMaxNameLen = 128;
+ constexpr size_t kNamePrefixLen = 11;
+ auto name_buffer = std::unique_ptr<char[]>(new char[kMaxNameLen]);
+ memcpy(name_buffer.get(), "wasm-to-js:", kNamePrefixLen);
+ PrintSignature(VectorOf(name_buffer.get(), kMaxNameLen) + kNamePrefixLen,
+ sig);
+
+ // Generate the call descriptor.
+ CallDescriptor* incoming =
+ GetWasmCallDescriptor(zone.get(), sig, WasmGraphBuilder::kNoRetpoline,
+ WasmCallKind::kWasmImportWrapper);
+
+ // Run the compilation job synchronously.
+ std::unique_ptr<OptimizedCompilationJob> job(
+ Pipeline::NewWasmHeapStubCompilationJob(
+ isolate, isolate->wasm_engine(), incoming, std::move(zone), graph,
+ CodeKind::WASM_TO_JS_FUNCTION, std::move(name_buffer),
+ AssemblerOptions::Default(isolate)));
+
+ // Compile the wrapper
+ if (job->ExecuteJob(isolate->counters()->runtime_call_stats()) ==
+ CompilationJob::FAILED ||
+ job->FinalizeJob(isolate) == CompilationJob::FAILED) {
+ return Handle<Code>();
+ }
+ Handle<Code> code = job->compilation_info()->code();
+ return code;
+}
+
MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
const wasm::FunctionSig* sig,
const wasm::WasmModule* module) {
@@ -7547,7 +7728,7 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig,
CodeKind::C_WASM_ENTRY, std::move(name_buffer),
AssemblerOptions::Default(isolate)));
- CHECK_NE(job->ExecuteJob(isolate->counters()->runtime_call_stats()),
+ CHECK_NE(job->ExecuteJob(isolate->counters()->runtime_call_stats(), nullptr),
CompilationJob::FAILED);
CHECK_NE(job->FinalizeJob(isolate), CompilationJob::FAILED);
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index ab42610239..c431f53efe 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -58,15 +58,12 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
// type of the target function/callable and whether the signature matches the
// argument arity.
enum class WasmImportCallKind : uint8_t {
- kLinkError, // static Wasm->Wasm type error
- kRuntimeTypeError, // runtime Wasm->JS type error
- kWasmToCapi, // fast Wasm->C-API call
- kWasmToWasm, // fast Wasm->Wasm call
- kJSFunctionArityMatch, // fast Wasm->JS call
- kJSFunctionArityMismatch, // Wasm->JS, needs adapter frame
- kJSFunctionArityMismatchSkipAdaptor, // Wasm->JS, arity mismatch calling
- // strict mode function where we don't
- // need the ArgumentsAdaptorTrampoline.
+ kLinkError, // static Wasm->Wasm type error
+ kRuntimeTypeError, // runtime Wasm->JS type error
+ kWasmToCapi, // fast Wasm->C-API call
+ kWasmToWasm, // fast Wasm->Wasm call
+ kJSFunctionArityMatch, // fast Wasm->JS call
+ kJSFunctionArityMismatch, // Wasm->JS, needs adapter frame
// Math functions imported from JavaScript that are intrinsified
kFirstMathIntrinsic,
kF64Acos = kFirstMathIntrinsic,
@@ -126,6 +123,11 @@ std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
const wasm::FunctionSig* sig, const wasm::WasmModule* module,
bool is_import, const wasm::WasmFeatures& enabled_features);
+MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
+ const wasm::FunctionSig* sig,
+ WasmImportCallKind kind,
+ int expected_arity);
+
// Compiles a stub with JS linkage that serves as an adapter for function
// objects constructed via {WebAssembly.Function}. It performs a round-trip
// simulating a JS-to-Wasm-to-JS coercion of parameter and return values.
@@ -206,10 +208,8 @@ class WasmGraphBuilder {
Node* RefNull();
Node* RefFunc(uint32_t function_index);
Node* RefAsNonNull(Node* arg, wasm::WasmCodePosition position);
- Node* Uint32Constant(uint32_t value);
Node* Int32Constant(int32_t value);
Node* Int64Constant(int64_t value);
- Node* IntPtrConstant(intptr_t value);
Node* Float32Constant(float value);
Node* Float64Constant(double value);
Node* Simd128Constant(const uint8_t value[16]);
@@ -303,23 +303,28 @@ class WasmGraphBuilder {
//-----------------------------------------------------------------------
Node* CurrentMemoryPages();
Node* TraceMemoryOperation(bool is_store, MachineRepresentation, Node* index,
- uint32_t offset, wasm::WasmCodePosition);
+ uintptr_t offset, wasm::WasmCodePosition);
Node* LoadMem(wasm::ValueType type, MachineType memtype, Node* index,
- uint32_t offset, uint32_t alignment,
+ uint64_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
Node* LoadTransformBigEndian(wasm::ValueType type, MachineType memtype,
wasm::LoadTransformationKind transform,
- Node* index, uint32_t offset, uint32_t alignment,
+ Node* index, uint64_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
#endif
Node* LoadTransform(wasm::ValueType type, MachineType memtype,
wasm::LoadTransformationKind transform, Node* index,
- uint32_t offset, uint32_t alignment,
+ uint64_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
- Node* StoreMem(MachineRepresentation mem_rep, Node* index, uint32_t offset,
+ Node* LoadLane(MachineType memtype, Node* value, Node* index, uint32_t offset,
+ uint8_t laneidx, wasm::WasmCodePosition position);
+ Node* StoreMem(MachineRepresentation mem_rep, Node* index, uint64_t offset,
uint32_t alignment, Node* val, wasm::WasmCodePosition position,
wasm::ValueType type);
+ Node* StoreLane(MachineRepresentation mem_rep, Node* index, uint32_t offset,
+ uint32_t alignment, Node* val, uint8_t laneidx,
+ wasm::WasmCodePosition position, wasm::ValueType type);
static void PrintDebugName(Node* node);
void set_instance_node(Node* instance_node) {
@@ -382,7 +387,7 @@ class WasmGraphBuilder {
Node* Simd8x16ShuffleOp(const uint8_t shuffle[16], Node* const* inputs);
Node* AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
- uint32_t alignment, uint32_t offset,
+ uint32_t alignment, uint64_t offset,
wasm::WasmCodePosition position);
Node* AtomicFence();
@@ -455,7 +460,10 @@ class WasmGraphBuilder {
Node* BuildLoadIsolateRoot();
- Node* MemBuffer(uint32_t offset);
+ // MemBuffer is only called with valid offsets (after bounds checking), so the
+ // offset fits in a platform-dependent uintptr_t.
+ Node* MemBuffer(uintptr_t offset);
+
// BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index.
Node* BoundsCheckMem(uint8_t access_size, Node* index, uint64_t offset,
wasm::WasmCodePosition, EnforceBoundsCheck);
@@ -470,8 +478,8 @@ class WasmGraphBuilder {
// partially out-of-bounds, traps if it is completely out-of-bounds.
Node* BoundsCheckMemRange(Node** start, Node** size, wasm::WasmCodePosition);
- Node* CheckBoundsAndAlignment(uint8_t access_size, Node* index,
- uint32_t offset, wasm::WasmCodePosition);
+ Node* CheckBoundsAndAlignment(int8_t access_size, Node* index,
+ uint64_t offset, wasm::WasmCodePosition);
Node* Uint32ToUintptr(Node*);
const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type);
@@ -612,7 +620,7 @@ class WasmGraphBuilder {
Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig,
Node* iterable, Node* context);
- Node* BuildLoadFunctionDataFromExportedFunction(Node* closure);
+ Node* BuildLoadFunctionDataFromJSFunction(Node* closure);
Node* BuildLoadJumpTableOffsetFromExportedFunctionData(Node* function_data);
Node* BuildLoadFunctionIndexFromExportedFunctionData(Node* function_data);
diff --git a/deps/v8/src/compiler/zone-stats.h b/deps/v8/src/compiler/zone-stats.h
index a272a674d4..8e0f4fa72a 100644
--- a/deps/v8/src/compiler/zone-stats.h
+++ b/deps/v8/src/compiler/zone-stats.h
@@ -28,6 +28,9 @@ class V8_EXPORT_PRIVATE ZoneStats final {
support_zone_compression_(support_zone_compression) {}
~Scope() { Destroy(); }
+ Scope(const Scope&) = delete;
+ Scope& operator=(const Scope&) = delete;
+
Zone* zone() {
if (zone_ == nullptr)
zone_ =
@@ -46,13 +49,14 @@ class V8_EXPORT_PRIVATE ZoneStats final {
ZoneStats* const zone_stats_;
Zone* zone_;
const bool support_zone_compression_;
- DISALLOW_COPY_AND_ASSIGN(Scope);
};
class V8_EXPORT_PRIVATE StatsScope final {
public:
explicit StatsScope(ZoneStats* zone_stats);
~StatsScope();
+ StatsScope(const StatsScope&) = delete;
+ StatsScope& operator=(const StatsScope&) = delete;
size_t GetMaxAllocatedBytes();
size_t GetCurrentAllocatedBytes();
@@ -68,12 +72,12 @@ class V8_EXPORT_PRIVATE ZoneStats final {
InitialValues initial_values_;
size_t total_allocated_bytes_at_start_;
size_t max_allocated_bytes_;
-
- DISALLOW_COPY_AND_ASSIGN(StatsScope);
};
explicit ZoneStats(AccountingAllocator* allocator);
~ZoneStats();
+ ZoneStats(const ZoneStats&) = delete;
+ ZoneStats& operator=(const ZoneStats&) = delete;
size_t GetMaxAllocatedBytes() const;
size_t GetTotalAllocatedBytes() const;
@@ -92,8 +96,6 @@ class V8_EXPORT_PRIVATE ZoneStats final {
size_t max_allocated_bytes_;
size_t total_deleted_bytes_;
AccountingAllocator* allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(ZoneStats);
};
} // namespace compiler
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index dc17a69697..da7f0e4dd1 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -540,8 +540,8 @@ class StreamingCompileTask final : public v8::Task {
StreamingCompileTask(Isolate* isolate,
v8::ScriptCompiler::StreamedSource* streamed_source)
: isolate_(isolate),
- script_streaming_task_(v8::ScriptCompiler::StartStreamingScript(
- isolate, streamed_source)) {
+ script_streaming_task_(
+ v8::ScriptCompiler::StartStreaming(isolate, streamed_source)) {
Shell::NotifyStartStreamingTask(isolate_);
}
@@ -835,7 +835,8 @@ MaybeLocal<Module> ResolveModuleCallback(Local<Context> context,
} // anonymous namespace
-MaybeLocal<Module> Shell::FetchModuleTree(Local<Context> context,
+MaybeLocal<Module> Shell::FetchModuleTree(Local<Module> referrer,
+ Local<Context> context,
const std::string& file_name) {
DCHECK(IsAbsolutePath(file_name));
Isolate* isolate = context->GetIsolate();
@@ -848,8 +849,16 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Context> context,
source_text = ReadFile(isolate, fallback_file_name.c_str());
}
}
+
+ ModuleEmbedderData* d = GetModuleDataFromContext(context);
if (source_text.IsEmpty()) {
- std::string msg = "d8: Error reading module from " + file_name;
+ std::string msg = "d8: Error reading module from " + file_name;
+ if (!referrer.IsEmpty()) {
+ auto specifier_it =
+ d->module_to_specifier_map.find(Global<Module>(isolate, referrer));
+ CHECK(specifier_it != d->module_to_specifier_map.end());
+ msg += "\n imported by " + specifier_it->second;
+ }
Throw(isolate, msg.c_str());
return MaybeLocal<Module>();
}
@@ -863,7 +872,6 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Context> context,
return MaybeLocal<Module>();
}
- ModuleEmbedderData* d = GetModuleDataFromContext(context);
CHECK(d->specifier_to_module_map
.insert(std::make_pair(file_name, Global<Module>(isolate, module)))
.second);
@@ -878,7 +886,7 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Context> context,
std::string absolute_path =
NormalizePath(ToSTLString(isolate, name), dir_name);
if (d->specifier_to_module_map.count(absolute_path)) continue;
- if (FetchModuleTree(context, absolute_path).IsEmpty()) {
+ if (FetchModuleTree(module, context, absolute_path).IsEmpty()) {
return MaybeLocal<Module>();
}
}
@@ -1023,7 +1031,8 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
auto module_it = d->specifier_to_module_map.find(absolute_path);
if (module_it != d->specifier_to_module_map.end()) {
root_module = module_it->second.Get(isolate);
- } else if (!FetchModuleTree(realm, absolute_path).ToLocal(&root_module)) {
+ } else if (!FetchModuleTree(Local<Module>(), realm, absolute_path)
+ .ToLocal(&root_module)) {
CHECK(try_catch.HasCaught());
resolver->Reject(realm, try_catch.Exception()).ToChecked();
return;
@@ -1090,7 +1099,8 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
Local<Module> root_module;
- if (!FetchModuleTree(realm, absolute_path).ToLocal(&root_module)) {
+ if (!FetchModuleTree(Local<Module>(), realm, absolute_path)
+ .ToLocal(&root_module)) {
CHECK(try_catch.HasCaught());
ReportException(isolate, &try_catch);
return false;
@@ -1210,6 +1220,7 @@ int PerIsolateData::HandleUnhandledPromiseRejections() {
Shell::ReportException(isolate_, message, value);
}
unhandled_promises_.clear();
+ ignore_unhandled_promises_ = false;
return static_cast<int>(i);
}
@@ -1313,10 +1324,14 @@ void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw(args.GetIsolate(), "Invalid argument");
return;
}
- int index = data->RealmFind(args[0]
- ->ToObject(isolate->GetCurrentContext())
- .ToLocalChecked()
- ->CreationContext());
+ Local<Object> object =
+ args[0]->ToObject(isolate->GetCurrentContext()).ToLocalChecked();
+ i::Handle<i::JSReceiver> i_object = Utils::OpenHandle(*object);
+ if (i_object->IsJSGlobalProxy() &&
+ i::Handle<i::JSGlobalProxy>::cast(i_object)->IsDetached()) {
+ return;
+ }
+ int index = data->RealmFind(object->CreationContext());
if (index == -1) return;
args.GetReturnValue().Set(index);
}
@@ -2140,7 +2155,10 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
global_template->Set(isolate, "performance",
Shell::CreatePerformanceTemplate(isolate));
global_template->Set(isolate, "Worker", Shell::CreateWorkerTemplate(isolate));
- global_template->Set(isolate, "os", Shell::CreateOSTemplate(isolate));
+ // Prevent fuzzers from creating side effects.
+ if (!i::FLAG_fuzzing) {
+ global_template->Set(isolate, "os", Shell::CreateOSTemplate(isolate));
+ }
global_template->Set(isolate, "d8", Shell::CreateD8Template(isolate));
#ifdef V8_FUZZILLI
@@ -3571,7 +3589,6 @@ int Shell::RunMain(Isolate* isolate, bool last_run) {
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
if (!options.isolate_sources[0].Execute(isolate)) success = false;
if (!CompleteMessageLoop(isolate)) success = false;
- if (!HandleUnhandledPromiseRejections(isolate)) success = false;
}
if (!use_existing_context) {
DisposeModuleEmbedderData(context);
@@ -3603,6 +3620,9 @@ int Shell::RunMain(Isolate* isolate, bool last_run) {
printf("%i pending unhandled Promise rejection(s) detected.\n",
Shell::unhandled_promise_rejections_.load());
success = false;
+ // RunMain may be executed multiple times, e.g. in REPRL mode, so we have to
+ // reset this counter.
+ Shell::unhandled_promise_rejections_.store(0);
}
// In order to finish successfully, success must be != expected_to_throw.
return success == Shell::options.expected_to_throw ? 1 : 0;
diff --git a/deps/v8/src/d8/d8.h b/deps/v8/src/d8/d8.h
index 11ec47d815..e0fb8d5892 100644
--- a/deps/v8/src/d8/d8.h
+++ b/deps/v8/src/d8/d8.h
@@ -608,7 +608,8 @@ class Shell : public i::AllStatic {
v8::MaybeLocal<Value> global_object);
static void DisposeRealm(const v8::FunctionCallbackInfo<v8::Value>& args,
int index);
- static MaybeLocal<Module> FetchModuleTree(v8::Local<v8::Context> context,
+ static MaybeLocal<Module> FetchModuleTree(v8::Local<v8::Module> origin_module,
+ v8::Local<v8::Context> context,
const std::string& file_name);
static ScriptCompiler::CachedData* LookupCodeCache(Isolate* isolate,
Local<Value> name);
diff --git a/deps/v8/src/date/DIR_METADATA b/deps/v8/src/date/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/date/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/date/OWNERS b/deps/v8/src/date/OWNERS
index 6edeeae0ea..3e6f2b948d 100644
--- a/deps/v8/src/date/OWNERS
+++ b/deps/v8/src/date/OWNERS
@@ -2,5 +2,3 @@ ishell@chromium.org
jshin@chromium.org
ulan@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/debug/DIR_METADATA b/deps/v8/src/debug/DIR_METADATA
new file mode 100644
index 0000000000..3ba1106a5f
--- /dev/null
+++ b/deps/v8/src/debug/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Platform>DevTools>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/debug/OWNERS b/deps/v8/src/debug/OWNERS
index 6c13c38e48..5b93352039 100644
--- a/deps/v8/src/debug/OWNERS
+++ b/deps/v8/src/debug/OWNERS
@@ -4,5 +4,3 @@ mvstanton@chromium.org
szuend@chromium.org
verwaest@chromium.org
yangguo@chromium.org
-
-# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index c7d0a890c4..7fb0b3723f 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -17,10 +17,26 @@
#include "src/interpreter/bytecodes.h"
#include "src/objects/contexts.h"
#include "src/snapshot/snapshot.h"
+#include "src/wasm/wasm-debug.h"
+#include "src/wasm/wasm-js.h"
namespace v8 {
namespace internal {
+namespace {
+static MaybeHandle<SharedFunctionInfo> GetFunctionInfo(Isolate* isolate,
+ Handle<String> source,
+ REPLMode repl_mode) {
+ Compiler::ScriptDetails script_details(isolate->factory()->empty_string());
+ script_details.repl_mode = repl_mode;
+ ScriptOriginOptions origin_options(false, true);
+ return Compiler::GetSharedFunctionInfoForScript(
+ isolate, source, script_details, origin_options, nullptr, nullptr,
+ ScriptCompiler::kNoCompileOptions, ScriptCompiler::kNoCacheNoReason,
+ NOT_NATIVES_CODE);
+}
+} // namespace
+
MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
Handle<String> source,
debug::EvaluateGlobalMode mode,
@@ -32,19 +48,12 @@ MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
mode ==
debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect);
- Handle<Context> context = isolate->native_context();
- Compiler::ScriptDetails script_details(isolate->factory()->empty_string());
- script_details.repl_mode = repl_mode;
- ScriptOriginOptions origin_options(false, true);
- MaybeHandle<SharedFunctionInfo> maybe_function_info =
- Compiler::GetSharedFunctionInfoForScript(
- isolate, source, script_details, origin_options, nullptr, nullptr,
- ScriptCompiler::kNoCompileOptions, ScriptCompiler::kNoCacheNoReason,
- NOT_NATIVES_CODE);
-
Handle<SharedFunctionInfo> shared_info;
- if (!maybe_function_info.ToHandle(&shared_info)) return MaybeHandle<Object>();
+ if (!GetFunctionInfo(isolate, source, repl_mode).ToHandle(&shared_info)) {
+ return MaybeHandle<Object>();
+ }
+ Handle<Context> context = isolate->native_context();
Handle<JSFunction> fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(shared_info,
context);
@@ -91,6 +100,39 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
return maybe_result;
}
+V8_EXPORT MaybeHandle<Object> DebugEvaluate::WebAssembly(
+ Handle<WasmInstanceObject> instance, StackFrameId frame_id,
+ Handle<String> source, bool throw_on_side_effect) {
+ Isolate* isolate = instance->GetIsolate();
+
+ StackTraceFrameIterator it(isolate, frame_id);
+ if (!it.is_wasm()) return isolate->factory()->undefined_value();
+ WasmFrame* frame = WasmFrame::cast(it.frame());
+
+ Handle<JSProxy> context_extension = WasmJs::GetJSDebugProxy(frame);
+
+ DisableBreak disable_break_scope(isolate->debug(), /*disable=*/true);
+
+ Handle<SharedFunctionInfo> shared_info;
+ if (!GetFunctionInfo(isolate, source, REPLMode::kNo).ToHandle(&shared_info)) {
+ return {};
+ }
+
+ Handle<ScopeInfo> scope_info =
+ ScopeInfo::CreateForWithScope(isolate, Handle<ScopeInfo>::null());
+ Handle<Context> context = isolate->factory()->NewWithContext(
+ isolate->native_context(), scope_info, context_extension);
+
+ Handle<Object> result;
+ if (!DebugEvaluate::Evaluate(isolate, shared_info, context, context_extension,
+ source, throw_on_side_effect)
+ .ToHandle(&result)) {
+ return {};
+ }
+
+ return result;
+}
+
MaybeHandle<Object> DebugEvaluate::WithTopmostArguments(Isolate* isolate,
Handle<String> source) {
// Handle the processing of break.
@@ -321,6 +363,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ObjectEntries) \
V(ObjectEntriesSkipFastPath) \
V(ObjectHasOwnProperty) \
+ V(ObjectKeys) \
V(ObjectValues) \
V(ObjectValuesSkipFastPath) \
V(ObjectGetOwnPropertyNames) \
@@ -511,6 +554,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kObjectIsExtensible:
case Builtins::kObjectIsFrozen:
case Builtins::kObjectIsSealed:
+ case Builtins::kObjectKeys:
case Builtins::kObjectPrototypeValueOf:
case Builtins::kObjectValues:
case Builtins::kObjectPrototypeHasOwnProperty:
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index aa1abba115..2f4cc2da4e 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -37,6 +37,10 @@ class DebugEvaluate : public AllStatic {
Handle<String> source,
bool throw_on_side_effect);
+ static V8_EXPORT MaybeHandle<Object> WebAssembly(
+ Handle<WasmInstanceObject> instance, StackFrameId frame_id,
+ Handle<String> source, bool throw_on_side_effect);
+
// This is used for break-at-entry for builtins and API functions.
// Evaluate a piece of JavaScript in the native context, but with the
// materialized arguments object and receiver of the current call.
@@ -102,7 +106,6 @@ class DebugEvaluate : public AllStatic {
bool throw_on_side_effect);
};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index d0ee6bda42..4c8da809bb 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -11,7 +11,7 @@
namespace v8 {
namespace internal {
-FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
+FrameInspector::FrameInspector(CommonFrame* frame, int inlined_frame_index,
Isolate* isolate)
: frame_(frame),
inlined_frame_index_(inlined_frame_index),
@@ -54,14 +54,10 @@ JavaScriptFrame* FrameInspector::javascript_frame() {
: JavaScriptFrame::cast(frame_);
}
-int FrameInspector::GetParametersCount() {
- if (is_optimized_) return deoptimized_frame_->parameters_count();
- return frame_->ComputeParametersCount();
-}
-
Handle<Object> FrameInspector::GetParameter(int index) {
if (is_optimized_) return deoptimized_frame_->GetParameter(index);
- return handle(frame_->GetParameter(index), isolate_);
+ DCHECK(IsJavaScript());
+ return handle(javascript_frame()->GetParameter(index), isolate_);
}
Handle<Object> FrameInspector::GetExpression(int index) {
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 541ee1dc1d..c554ca17b5 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -16,17 +16,15 @@ namespace v8 {
namespace internal {
class JavaScriptFrame;
-class StandardFrame;
+class CommonFrame;
class WasmFrame;
class FrameInspector {
public:
- FrameInspector(StandardFrame* frame, int inlined_frame_index,
- Isolate* isolate);
+ FrameInspector(CommonFrame* frame, int inlined_frame_index, Isolate* isolate);
~FrameInspector();
- int GetParametersCount();
Handle<JSFunction> GetFunction() const { return function_; }
Handle<Script> GetScript() { return script_; }
Handle<Object> GetParameter(int index);
@@ -49,7 +47,7 @@ class FrameInspector {
bool ParameterIsShadowedByContextLocal(Handle<ScopeInfo> info,
Handle<String> parameter_name);
- StandardFrame* frame_;
+ CommonFrame* frame_;
int inlined_frame_index_;
std::unique_ptr<DeoptimizedFrameInfo> deoptimized_frame_;
Isolate* isolate_;
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 9234fe35ac..ded8a31639 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -24,6 +24,7 @@ struct CoverageScript;
struct TypeProfileEntry;
struct TypeProfileScript;
class Coverage;
+class DisableBreak;
class PostponeInterruptsScope;
class Script;
class TypeProfile;
@@ -541,6 +542,15 @@ class PostponeInterruptsScope {
std::unique_ptr<i::PostponeInterruptsScope> scope_;
};
+class DisableBreakScope {
+ public:
+ explicit DisableBreakScope(v8::Isolate* isolate);
+ ~DisableBreakScope();
+
+ private:
+ std::unique_ptr<i::DisableBreak> scope_;
+};
+
class WeakMap : public v8::Object {
public:
WeakMap() = delete;
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 28d595853c..ea0f4d3fc9 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -166,7 +166,7 @@ v8::Local<v8::Function> DebugStackTraceIterator::GetFunction() const {
std::unique_ptr<v8::debug::ScopeIterator>
DebugStackTraceIterator::GetScopeIterator() const {
DCHECK(!Done());
- StandardFrame* frame = iterator_.frame();
+ CommonFrame* frame = iterator_.frame();
if (frame->is_wasm()) {
return std::make_unique<DebugWasmScopeIterator>(isolate_,
WasmFrame::cast(frame));
@@ -184,11 +184,25 @@ v8::MaybeLocal<v8::Value> DebugStackTraceIterator::Evaluate(
v8::Local<v8::String> source, bool throw_on_side_effect) {
DCHECK(!Done());
Handle<Object> value;
+
i::SafeForInterruptsScope safe_for_interrupt_scope(isolate_);
- if (!DebugEvaluate::Local(isolate_, iterator_.frame()->id(),
- inlined_frame_index_, Utils::OpenHandle(*source),
- throw_on_side_effect)
- .ToHandle(&value)) {
+ bool success = false;
+ if (iterator_.is_wasm()) {
+ FrameSummary summary = FrameSummary::Get(iterator_.frame(), 0);
+ const FrameSummary::WasmFrameSummary& wasmSummary = summary.AsWasm();
+ Handle<WasmInstanceObject> instance = wasmSummary.wasm_instance();
+
+ success = DebugEvaluate::WebAssembly(instance, iterator_.frame()->id(),
+ Utils::OpenHandle(*source),
+ throw_on_side_effect)
+ .ToHandle(&value);
+ } else {
+ success = DebugEvaluate::Local(
+ isolate_, iterator_.frame()->id(), inlined_frame_index_,
+ Utils::OpenHandle(*source), throw_on_side_effect)
+ .ToHandle(&value);
+ }
+ if (!success) {
isolate_->OptionalRescheduleException(false);
return v8::MaybeLocal<v8::Value>();
}
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index d86a7ff5b8..a65c1b3bfd 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -1041,7 +1041,7 @@ void Debug::PrepareStep(StepAction step_action) {
thread_local_.last_step_action_ = step_action;
StackTraceFrameIterator frames_it(isolate_, frame_id);
- StandardFrame* frame = frames_it.frame();
+ CommonFrame* frame = frames_it.frame();
BreakLocation location = BreakLocation::Invalid();
Handle<SharedFunctionInfo> shared;
@@ -1327,7 +1327,8 @@ void Debug::InstallDebugBreakTrampoline() {
}
} else if (obj.IsJSObject()) {
JSObject object = JSObject::cast(obj);
- DescriptorArray descriptors = object.map().instance_descriptors();
+ DescriptorArray descriptors =
+ object.map().instance_descriptors(kRelaxedLoad);
for (InternalIndex i : object.map().IterateOwnDescriptors()) {
if (descriptors.GetDetails(i).kind() == PropertyKind::kAccessor) {
@@ -1703,7 +1704,8 @@ void Debug::FreeDebugInfoListNode(DebugInfoListNode* prev,
// Pack script back into the
// SFI::script_or_debug_info field.
Handle<DebugInfo> debug_info(node->debug_info());
- debug_info->shared().set_script_or_debug_info(debug_info->script());
+ debug_info->shared().set_script_or_debug_info(debug_info->script(),
+ kReleaseStore);
delete node;
}
@@ -1969,7 +1971,7 @@ bool Debug::ShouldBeSkipped() {
DisableBreak no_recursive_break(this);
StackTraceFrameIterator iterator(isolate_);
- StandardFrame* frame = iterator.frame();
+ CommonFrame* frame = iterator.frame();
FrameSummary summary = FrameSummary::GetTop(frame);
Handle<Object> script_obj = summary.script();
if (!script_obj->IsScript()) return false;
@@ -2135,7 +2137,7 @@ void Debug::PrintBreakLocation() {
HandleScope scope(isolate_);
StackTraceFrameIterator iterator(isolate_);
if (iterator.done()) return;
- StandardFrame* frame = iterator.frame();
+ CommonFrame* frame = iterator.frame();
FrameSummary summary = FrameSummary::GetTop(frame);
summary.EnsureSourcePositionsAvailable();
int source_position = summary.SourcePosition();
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index e62fbab04a..981692c3bb 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -1025,7 +1025,7 @@ void TranslateSourcePositionTable(Isolate* isolate, Handle<BytecodeArray> code,
Handle<ByteArray> new_source_position_table(
builder.ToSourcePositionTable(isolate));
- code->set_synchronized_source_position_table(*new_source_position_table);
+ code->set_source_position_table(*new_source_position_table, kReleaseStore);
LOG_CODE_EVENT(isolate,
CodeLinePosInfoRecordEvent(code->GetFirstBytecodeAddress(),
*new_source_position_table));
diff --git a/deps/v8/src/debug/ppc/OWNERS b/deps/v8/src/debug/ppc/OWNERS
index 6edd45a6ef..02c2cd757c 100644
--- a/deps/v8/src/debug/ppc/OWNERS
+++ b/deps/v8/src/debug/ppc/OWNERS
@@ -2,3 +2,4 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
+vasili.skurydzin@ibm.com
diff --git a/deps/v8/src/debug/wasm/gdb-server/DIR_METADATA b/deps/v8/src/debug/wasm/gdb-server/DIR_METADATA
new file mode 100644
index 0000000000..3b428d9660
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>WebAssembly"
+} \ No newline at end of file
diff --git a/deps/v8/src/debug/wasm/gdb-server/OWNERS b/deps/v8/src/debug/wasm/gdb-server/OWNERS
index 4b8c1919e8..e2c94e8d24 100644
--- a/deps/v8/src/debug/wasm/gdb-server/OWNERS
+++ b/deps/v8/src/debug/wasm/gdb-server/OWNERS
@@ -1,3 +1 @@
paolosev@microsoft.com
-
-# COMPONENT: Blink>JavaScript>WebAssembly \ No newline at end of file
diff --git a/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
index 5074acbb63..f0b77bc096 100644
--- a/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
+++ b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
@@ -98,7 +98,7 @@ std::vector<wasm_addr_t> WasmModuleDebug::GetCallStack(
case StackFrame::WASM: {
// A standard frame may include many summarized frames, due to inlining.
std::vector<FrameSummary> frames;
- StandardFrame::cast(frame)->Summarize(&frames);
+ CommonFrame::cast(frame)->Summarize(&frames);
for (size_t i = frames.size(); i-- != 0;) {
int offset = 0;
Handle<Script> script;
@@ -156,7 +156,7 @@ std::vector<FrameSummary> WasmModuleDebug::FindWasmFrame(
case StackFrame::WASM: {
// A standard frame may include many summarized frames, due to inlining.
std::vector<FrameSummary> frames;
- StandardFrame::cast(frame)->Summarize(&frames);
+ CommonFrame::cast(frame)->Summarize(&frames);
const size_t frame_count = frames.size();
DCHECK_GT(frame_count, 0);
diff --git a/deps/v8/src/deoptimizer/DIR_METADATA b/deps/v8/src/deoptimizer/DIR_METADATA
new file mode 100644
index 0000000000..fc018666b1
--- /dev/null
+++ b/deps/v8/src/deoptimizer/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Compiler"
+} \ No newline at end of file
diff --git a/deps/v8/src/deoptimizer/OWNERS b/deps/v8/src/deoptimizer/OWNERS
index e4ff70c640..eae6bba0ae 100644
--- a/deps/v8/src/deoptimizer/OWNERS
+++ b/deps/v8/src/deoptimizer/OWNERS
@@ -3,5 +3,3 @@ neis@chromium.org
nicohartmann@chromium.org
sigurds@chromium.org
tebbi@chromium.org
-
-# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
index c04e49282e..27684d9e39 100644
--- a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
@@ -2,246 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/codegen/assembler-inl.h"
-#include "src/codegen/macro-assembler.h"
-#include "src/codegen/register-configuration.h"
-#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
-#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
-const int Deoptimizer::kNonLazyDeoptExitSize = 0;
-const int Deoptimizer::kLazyDeoptExitSize = 0;
-
-#define __ masm->
-
-// This code tries to be close to ia32 code so that any changes can be
-// easily ported.
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind deopt_kind) {
- NoRootArrayScope no_root_array(masm);
-
- // Save all general purpose registers before messing with them.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- // Everything but pc, lr and ip which will be saved but not restored.
- RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
-
- const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kNumRegisters;
-
- // Save all allocatable VFP registers before messing with them.
- {
- // We use a run-time check for VFP32DREGS.
- CpuFeatureScope scope(masm, VFP32DREGS,
- CpuFeatureScope::kDontCheckSupported);
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
-
- // Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(scratch);
-
- // Push registers d0-d15, and possibly d16-d31, on the stack.
- // If d16-d31 are not pushed, decrease the stack pointer instead.
- __ vstm(db_w, sp, d16, d31, ne);
- // Okay to not call AllocateStackSpace here because the size is a known
- // small number and we need to use condition codes.
- __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
- __ vstm(db_w, sp, d0, d15);
- }
-
- // Push all 16 registers (needed to populate FrameDescription::registers_).
- // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
- // handle this a bit differently.
- __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
-
- {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
- __ mov(scratch, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate)));
- __ str(fp, MemOperand(scratch));
- }
-
- const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
-
- // Get the bailout id is passed as r10 by the caller.
- __ mov(r2, r10);
-
- // Get the address of the location in the code object (r3) (return
- // address for lazy deoptimization) and compute the fp-to-sp delta in
- // register r4.
- __ mov(r3, lr);
- __ add(r4, sp, Operand(kSavedRegistersAreaSize));
- __ sub(r4, fp, r4);
-
- // Allocate a new deoptimizer object.
- // Pass four arguments in r0 to r3 and fifth argument on stack.
- __ PrepareCallCFunction(6);
- __ mov(r0, Operand(0));
- Label context_check;
- __ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(r1, &context_check);
- __ ldr(r0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ bind(&context_check);
- __ mov(r1, Operand(static_cast<int>(deopt_kind)));
- // r2: bailout id already loaded.
- // r3: code address or 0 already loaded.
- __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
- __ mov(r5, Operand(ExternalReference::isolate_address(isolate)));
- __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
- // Call Deoptimizer::New().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
- }
-
- // Preserve "deoptimizer" object in register r0 and get the input
- // frame descriptor pointer to r1 (deoptimizer->input_);
- __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
-
- // Copy core registers into FrameDescription::registers_[kNumRegisters].
- DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ ldr(r2, MemOperand(sp, i * kPointerSize));
- __ str(r2, MemOperand(r1, offset));
- }
-
- // Copy VFP registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
- int double_regs_offset = FrameDescription::double_registers_offset();
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ vldr(d0, sp, src_offset);
- __ vstr(d0, r1, dst_offset);
- }
-
- // Mark the stack as not iterable for the CPU profiler which won't be able to
- // walk the stack without the return address.
- {
- UseScratchRegisterScope temps(masm);
- Register is_iterable = temps.Acquire();
- Register zero = r4;
- __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
- __ mov(zero, Operand(0));
- __ strb(zero, MemOperand(is_iterable));
- }
-
- // Remove the saved registers from the stack.
- __ add(sp, sp, Operand(kSavedRegistersAreaSize));
-
- // Compute a pointer to the unwinding limit in register r2; that is
- // the first stack slot not part of the input frame.
- __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
- __ add(r2, r2, sp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
- Label pop_loop;
- Label pop_loop_header;
- __ b(&pop_loop_header);
- __ bind(&pop_loop);
- __ pop(r4);
- __ str(r4, MemOperand(r3, 0));
- __ add(r3, r3, Operand(sizeof(uint32_t)));
- __ bind(&pop_loop_header);
- __ cmp(r2, sp);
- __ b(ne, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(r0); // Preserve deoptimizer object across call.
- // r0: deoptimizer object; r1: scratch.
- __ PrepareCallCFunction(1);
- // Call Deoptimizer::ComputeOutputFrames().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
- }
- __ pop(r0); // Restore deoptimizer object (class Deoptimizer).
-
- __ ldr(sp, MemOperand(r0, Deoptimizer::caller_frame_top_offset()));
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
- // Outer loop state: r4 = current "FrameDescription** output_",
- // r1 = one past the last FrameDescription**.
- __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
- __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_.
- __ add(r1, r4, Operand(r1, LSL, 2));
- __ jmp(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
- __ ldr(r2, MemOperand(r4, 0)); // output_[ix]
- __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
- __ jmp(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ sub(r3, r3, Operand(sizeof(uint32_t)));
- __ add(r6, r2, Operand(r3));
- __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
- __ push(r6);
- __ bind(&inner_loop_header);
- __ cmp(r3, Operand::Zero());
- __ b(ne, &inner_push_loop); // test for gt?
- __ add(r4, r4, Operand(kPointerSize));
- __ bind(&outer_loop_header);
- __ cmp(r4, r1);
- __ b(lt, &outer_push_loop);
-
- __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- DwVfpRegister reg = DwVfpRegister::from_code(code);
- int src_offset = code * kDoubleSize + double_regs_offset;
- __ vldr(reg, r1, src_offset);
- }
-
- // Push pc and continuation from the last output frame.
- __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
- __ push(r6);
- __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
- __ push(r6);
-
- // Push the registers from the last output frame.
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ ldr(r6, MemOperand(r2, offset));
- __ push(r6);
- }
-
- // Restore the registers from the stack.
- __ ldm(ia_w, sp, restored_regs); // all but pc registers.
-
- {
- UseScratchRegisterScope temps(masm);
- Register is_iterable = temps.Acquire();
- Register one = r4;
- __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
- __ mov(one, Operand(1));
- __ strb(one, MemOperand(is_iterable));
- }
-
- // Remove sp, lr and pc.
- __ Drop(3);
- {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
- __ pop(scratch); // get continuation, leave pc on stack
- __ pop(lr);
- __ Jump(scratch);
- }
-
- __ stop();
-}
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
+const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
const int kShift = n % 2 == 0 ? 0 : 32;
@@ -265,7 +33,5 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
index 21a75d024d..c695347a0b 100644
--- a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
@@ -3,12 +3,7 @@
// found in the LICENSE file.
#include "src/api/api.h"
-#include "src/codegen/arm64/assembler-arm64-inl.h"
-#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
-#include "src/codegen/register-configuration.h"
-#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
-#include "src/execution/frame-constants.h"
#include "src/execution/pointer-authentication.h"
namespace v8 {
@@ -22,286 +17,6 @@ const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 1 * kInstrSize;
#endif
-#define __ masm->
-
-namespace {
-
-void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
- int dst_offset, const CPURegList& reg_list,
- const Register& temp0, const Register& temp1,
- int src_offset = 0) {
- DCHECK_EQ(reg_list.Count() % 2, 0);
- UseScratchRegisterScope temps(masm);
- CPURegList copy_to_input = reg_list;
- int reg_size = reg_list.RegisterSizeInBytes();
- DCHECK_EQ(temp0.SizeInBytes(), reg_size);
- DCHECK_EQ(temp1.SizeInBytes(), reg_size);
-
- // Compute some temporary addresses to avoid having the macro assembler set
- // up a temp with an offset for accesses out of the range of the addressing
- // mode.
- Register src = temps.AcquireX();
- masm->Add(src, sp, src_offset);
- masm->Add(dst, dst, dst_offset);
-
- // Write reg_list into the frame pointed to by dst.
- for (int i = 0; i < reg_list.Count(); i += 2) {
- masm->Ldp(temp0, temp1, MemOperand(src, i * reg_size));
-
- CPURegister reg0 = copy_to_input.PopLowestIndex();
- CPURegister reg1 = copy_to_input.PopLowestIndex();
- int offset0 = reg0.code() * reg_size;
- int offset1 = reg1.code() * reg_size;
-
- // Pair up adjacent stores, otherwise write them separately.
- if (offset1 == offset0 + reg_size) {
- masm->Stp(temp0, temp1, MemOperand(dst, offset0));
- } else {
- masm->Str(temp0, MemOperand(dst, offset0));
- masm->Str(temp1, MemOperand(dst, offset1));
- }
- }
- masm->Sub(dst, dst, dst_offset);
-}
-
-void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
- const Register& src_base, int src_offset) {
- DCHECK_EQ(reg_list.Count() % 2, 0);
- UseScratchRegisterScope temps(masm);
- CPURegList restore_list = reg_list;
- int reg_size = restore_list.RegisterSizeInBytes();
-
- // Compute a temporary addresses to avoid having the macro assembler set
- // up a temp with an offset for accesses out of the range of the addressing
- // mode.
- Register src = temps.AcquireX();
- masm->Add(src, src_base, src_offset);
-
- // No need to restore padreg.
- restore_list.Remove(padreg);
-
- // Restore every register in restore_list from src.
- while (!restore_list.IsEmpty()) {
- CPURegister reg0 = restore_list.PopLowestIndex();
- CPURegister reg1 = restore_list.PopLowestIndex();
- int offset0 = reg0.code() * reg_size;
-
- if (reg1 == NoCPUReg) {
- masm->Ldr(reg0, MemOperand(src, offset0));
- break;
- }
-
- int offset1 = reg1.code() * reg_size;
-
- // Pair up adjacent loads, otherwise read them separately.
- if (offset1 == offset0 + reg_size) {
- masm->Ldp(reg0, reg1, MemOperand(src, offset0));
- } else {
- masm->Ldr(reg0, MemOperand(src, offset0));
- masm->Ldr(reg1, MemOperand(src, offset1));
- }
- }
-}
-} // namespace
-
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind deopt_kind) {
- NoRootArrayScope no_root_array(masm);
-
- // TODO(all): This code needs to be revisited. We probably only need to save
- // caller-saved registers here. Callee-saved registers can be stored directly
- // in the input frame.
-
- // Save all allocatable double registers.
- CPURegList saved_double_registers(
- CPURegister::kVRegister, kDRegSizeInBits,
- RegisterConfiguration::Default()->allocatable_double_codes_mask());
- DCHECK_EQ(saved_double_registers.Count() % 2, 0);
- __ PushCPURegList(saved_double_registers);
-
- // We save all the registers except sp, lr, platform register (x18) and the
- // masm scratches.
- CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 28);
- saved_registers.Remove(ip0);
- saved_registers.Remove(ip1);
- saved_registers.Remove(x18);
- saved_registers.Combine(fp);
- saved_registers.Align();
- DCHECK_EQ(saved_registers.Count() % 2, 0);
- __ PushCPURegList(saved_registers);
-
- __ Mov(x3, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate)));
- __ Str(fp, MemOperand(x3));
-
- const int kSavedRegistersAreaSize =
- (saved_registers.Count() * kXRegSize) +
- (saved_double_registers.Count() * kDRegSize);
-
- // Floating point registers are saved on the stack above core registers.
- const int kDoubleRegistersOffset = saved_registers.Count() * kXRegSize;
-
- // We don't use a bailout id for arm64, because we can compute the id from the
- // address. Pass kMaxUInt32 instead to signify this.
- Register bailout_id = x2;
- __ Mov(bailout_id, kMaxUInt32);
-
- Register code_object = x3;
- Register fp_to_sp = x4;
- // Get the address of the location in the code object. This is the return
- // address for lazy deoptimization.
- __ Mov(code_object, lr);
- // Compute the fp-to-sp delta.
- __ Add(fp_to_sp, sp, kSavedRegistersAreaSize);
- __ Sub(fp_to_sp, fp, fp_to_sp);
-
- // Allocate a new deoptimizer object.
- __ Ldr(x1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
-
- // Ensure we can safely load from below fp.
- DCHECK_GT(kSavedRegistersAreaSize, -StandardFrameConstants::kFunctionOffset);
- __ Ldr(x0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
-
- // If x1 is a smi, zero x0.
- __ Tst(x1, kSmiTagMask);
- __ CzeroX(x0, eq);
-
- __ Mov(x1, static_cast<int>(deopt_kind));
- // Following arguments are already loaded:
- // - x2: bailout id
- // - x3: code object address
- // - x4: fp-to-sp delta
- __ Mov(x5, ExternalReference::isolate_address(isolate));
-
- {
- // Call Deoptimizer::New().
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
- }
-
- // Preserve "deoptimizer" object in register x0.
- Register deoptimizer = x0;
-
- // Get the input frame descriptor pointer.
- __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
-
- // Copy core registers into the input frame.
- CopyRegListToFrame(masm, x1, FrameDescription::registers_offset(),
- saved_registers, x2, x3);
-
- // Copy double registers to the input frame.
- CopyRegListToFrame(masm, x1, FrameDescription::double_registers_offset(),
- saved_double_registers, x2, x3, kDoubleRegistersOffset);
-
- // Mark the stack as not iterable for the CPU profiler which won't be able to
- // walk the stack without the return address.
- {
- UseScratchRegisterScope temps(masm);
- Register is_iterable = temps.AcquireX();
- __ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
- __ strb(xzr, MemOperand(is_iterable));
- }
-
- // Remove the saved registers from the stack.
- DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0);
- __ Drop(kSavedRegistersAreaSize / kXRegSize);
-
- // Compute a pointer to the unwinding limit in register x2; that is
- // the first stack slot not part of the input frame.
- Register unwind_limit = x2;
- __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ Add(x3, x1, FrameDescription::frame_content_offset());
- __ SlotAddress(x1, 0);
- __ Lsr(unwind_limit, unwind_limit, kSystemPointerSizeLog2);
- __ Mov(x5, unwind_limit);
- __ CopyDoubleWords(x3, x1, x5);
- __ Drop(unwind_limit);
-
- // Compute the output frame in the deoptimizer.
- __ Push(padreg, x0); // Preserve deoptimizer object across call.
- {
- // Call Deoptimizer::ComputeOutputFrames().
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
- }
- __ Pop(x4, padreg); // Restore deoptimizer object (class Deoptimizer).
-
- {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.AcquireX();
- __ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
- __ Mov(sp, scratch);
- }
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, outer_loop_header;
- __ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
- __ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
- __ Add(x1, x0, Operand(x1, LSL, kSystemPointerSizeLog2));
- __ B(&outer_loop_header);
-
- __ Bind(&outer_push_loop);
- Register current_frame = x2;
- Register frame_size = x3;
- __ Ldr(current_frame, MemOperand(x0, kSystemPointerSize, PostIndex));
- __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
- __ Lsr(frame_size, x3, kSystemPointerSizeLog2);
- __ Claim(frame_size);
-
- __ Add(x7, current_frame, FrameDescription::frame_content_offset());
- __ SlotAddress(x6, 0);
- __ CopyDoubleWords(x6, x7, frame_size);
-
- __ Bind(&outer_loop_header);
- __ Cmp(x0, x1);
- __ B(lt, &outer_push_loop);
-
- __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
- RestoreRegList(masm, saved_double_registers, x1,
- FrameDescription::double_registers_offset());
-
- {
- UseScratchRegisterScope temps(masm);
- Register is_iterable = temps.AcquireX();
- Register one = x4;
- __ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
- __ Mov(one, Operand(1));
- __ strb(one, MemOperand(is_iterable));
- }
-
- // TODO(all): ARM copies a lot (if not all) of the last output frame onto the
- // stack, then pops it all into registers. Here, we try to load it directly
- // into the relevant registers. Is this correct? If so, we should improve the
- // ARM code.
-
- // Restore registers from the last output frame.
- // Note that lr is not in the list of saved_registers and will be restored
- // later. We can use it to hold the address of last output frame while
- // reloading the other registers.
- DCHECK(!saved_registers.IncludesAliasOf(lr));
- Register last_output_frame = lr;
- __ Mov(last_output_frame, current_frame);
-
- RestoreRegList(masm, saved_registers, last_output_frame,
- FrameDescription::registers_offset());
-
- UseScratchRegisterScope temps(masm);
- temps.Exclude(x17);
- Register continuation = x17;
- __ Ldr(continuation, MemOperand(last_output_frame,
- FrameDescription::continuation_offset()));
- __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
-#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- __ Autibsp();
-#endif
- __ Br(continuation);
-}
-
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
static_cast<uint32_t>(double_registers_[n].get_bits()));
@@ -331,7 +46,5 @@ void FrameDescription::SetPc(intptr_t pc) {
pc_ = pc;
}
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index c2b4d402ee..63b4431128 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -23,11 +23,12 @@
#include "src/interpreter/interpreter.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
+#include "src/objects/arguments.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/smi.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/tracing/trace-event.h"
-#include "torque-generated/exported-class-definitions.h"
// Has to be the last include (doesn't have include guards)
#include "src/objects/object-macros.h"
@@ -103,7 +104,6 @@ class FrameWriter {
void PushStackJSArguments(TranslatedFrame::iterator& iterator,
int parameters_count) {
-#ifdef V8_REVERSE_JSARGS
std::vector<TranslatedFrame::iterator> parameters;
parameters.reserve(parameters_count);
for (int i = 0; i < parameters_count; ++i, ++iterator) {
@@ -112,11 +112,6 @@ class FrameWriter {
for (auto& parameter : base::Reversed(parameters)) {
PushTranslatedValue(parameter, "stack parameter");
}
-#else
- for (int i = 0; i < parameters_count; ++i, ++iterator) {
- PushTranslatedValue(iterator, "stack parameter");
- }
-#endif
}
unsigned top_offset() const { return top_offset_; }
@@ -179,25 +174,6 @@ class FrameWriter {
unsigned top_offset_;
};
-DeoptimizerData::DeoptimizerData(Heap* heap) : heap_(heap), current_(nullptr) {
- Code* start = &deopt_entry_code_[0];
- Code* end = &deopt_entry_code_[DeoptimizerData::kLastDeoptimizeKind + 1];
- strong_roots_entry_ =
- heap_->RegisterStrongRoots(FullObjectSlot(start), FullObjectSlot(end));
-}
-
-DeoptimizerData::~DeoptimizerData() {
- heap_->UnregisterStrongRoots(strong_roots_entry_);
-}
-
-Code DeoptimizerData::deopt_entry_code(DeoptimizeKind kind) {
- return deopt_entry_code_[static_cast<int>(kind)];
-}
-
-void DeoptimizerData::set_deopt_entry_code(DeoptimizeKind kind, Code code) {
- deopt_entry_code_[static_cast<int>(kind)] = code;
-}
-
Code Deoptimizer::FindDeoptimizingCode(Address addr) {
if (function_.IsHeapObject()) {
// Search all deoptimizing code in the native context of the function.
@@ -214,7 +190,7 @@ Code Deoptimizer::FindDeoptimizingCode(Address addr) {
return Code();
}
-// We rely on this function not causing a GC. It is called from generated code
+// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
Deoptimizer* Deoptimizer::New(Address raw_function, DeoptimizeKind kind,
unsigned bailout_id, Address from,
@@ -222,16 +198,13 @@ Deoptimizer* Deoptimizer::New(Address raw_function, DeoptimizeKind kind,
JSFunction function = JSFunction::cast(Object(raw_function));
Deoptimizer* deoptimizer = new Deoptimizer(isolate, function, kind,
bailout_id, from, fp_to_sp_delta);
- CHECK_NULL(isolate->deoptimizer_data()->current_);
- isolate->deoptimizer_data()->current_ = deoptimizer;
+ isolate->set_current_deoptimizer(deoptimizer);
return deoptimizer;
}
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
- Deoptimizer* result = isolate->deoptimizer_data()->current_;
- CHECK_NOT_NULL(result);
+ Deoptimizer* result = isolate->GetAndClearCurrentDeoptimizer();
result->DeleteFrameDescriptions();
- isolate->deoptimizer_data()->current_ = nullptr;
return result;
}
@@ -294,6 +267,7 @@ class ActivationsFinder : public ThreadVisitor {
SafepointEntry safepoint = code.GetSafepointEntry(it.frame()->pc());
int trampoline_pc = safepoint.trampoline_pc();
DCHECK_IMPLIES(code == topmost_, safe_to_deopt_);
+ CHECK_GE(trampoline_pc, 0);
// Replace the current pc on the stack with the trampoline.
// TODO(v8:10026): avoid replacing a signed pointer.
Address* pc_addr = it.frame()->pc_address();
@@ -498,8 +472,6 @@ const char* Deoptimizer::MessageFor(DeoptimizeKind kind, bool reuse_code) {
case DeoptimizeKind::kBailout:
return "bailout";
}
- FATAL("Unsupported deopt kind");
- return nullptr;
}
namespace {
@@ -542,6 +514,9 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
deoptimizing_throw_ = true;
}
+ DCHECK(bailout_id_ == kFixedExitSizeMarker ||
+ bailout_id_ < kMaxNumberOfEntries);
+
DCHECK_NE(from, kNullAddress);
compiled_code_ = FindOptimizedCode();
DCHECK(!compiled_code_.is_null());
@@ -570,7 +545,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
input_ = new (size) FrameDescription(size, parameter_count);
if (kSupportsFixedDeoptExitSizes) {
- DCHECK_EQ(bailout_id_, kMaxUInt32);
+ DCHECK_EQ(bailout_id_, kFixedExitSizeMarker);
// Calculate bailout id from return address.
DCHECK_GT(kNonLazyDeoptExitSize, 0);
DCHECK_GT(kLazyDeoptExitSize, 0);
@@ -582,7 +557,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
Address lazy_deopt_start =
deopt_start + non_lazy_deopt_count * kNonLazyDeoptExitSize;
// The deoptimization exits are sorted so that lazy deopt exits appear last.
- static_assert(DeoptimizeKind::kLazy == DeoptimizeKind::kLastDeoptimizeKind,
+ static_assert(DeoptimizeKind::kLazy == kLastDeoptimizeKind,
"lazy deopts are expected to be emitted last");
// from_ is the value of the link register after the call to the
// deoptimizer, so for the last lazy deopt, from_ points to the first
@@ -641,42 +616,44 @@ void Deoptimizer::DeleteFrameDescriptions() {
#endif // DEBUG
}
-Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
- DeoptimizeKind kind) {
- DeoptimizerData* data = isolate->deoptimizer_data();
- CHECK_LE(kind, DeoptimizerData::kLastDeoptimizeKind);
- CHECK(!data->deopt_entry_code(kind).is_null());
- return data->deopt_entry_code(kind).raw_instruction_start();
-}
-
-bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
- DeoptimizeKind type) {
- DeoptimizerData* data = isolate->deoptimizer_data();
- CHECK_LE(type, DeoptimizerData::kLastDeoptimizeKind);
- Code code = data->deopt_entry_code(type);
- if (code.is_null()) return false;
- return addr == code.raw_instruction_start();
+Builtins::Name Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
+ DeoptimizeKind kind) {
+ switch (kind) {
+ case DeoptimizeKind::kEager:
+ return Builtins::kDeoptimizationEntry_Eager;
+ case DeoptimizeKind::kSoft:
+ return Builtins::kDeoptimizationEntry_Soft;
+ case DeoptimizeKind::kBailout:
+ return Builtins::kDeoptimizationEntry_Bailout;
+ case DeoptimizeKind::kLazy:
+ return Builtins::kDeoptimizationEntry_Lazy;
+ }
}
bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
- DeoptimizeKind* type) {
- if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kEager)) {
- *type = DeoptimizeKind::kEager;
- return true;
- }
- if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kSoft)) {
- *type = DeoptimizeKind::kSoft;
- return true;
- }
- if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kLazy)) {
- *type = DeoptimizeKind::kLazy;
- return true;
- }
- if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kBailout)) {
- *type = DeoptimizeKind::kBailout;
- return true;
+ DeoptimizeKind* type_out) {
+ Code maybe_code = InstructionStream::TryLookupCode(isolate, addr);
+ if (maybe_code.is_null()) return false;
+
+ Code code = maybe_code;
+ switch (code.builtin_index()) {
+ case Builtins::kDeoptimizationEntry_Eager:
+ *type_out = DeoptimizeKind::kEager;
+ return true;
+ case Builtins::kDeoptimizationEntry_Soft:
+ *type_out = DeoptimizeKind::kSoft;
+ return true;
+ case Builtins::kDeoptimizationEntry_Bailout:
+ *type_out = DeoptimizeKind::kBailout;
+ return true;
+ case Builtins::kDeoptimizationEntry_Lazy:
+ *type_out = DeoptimizeKind::kLazy;
+ return true;
+ default:
+ return false;
}
- return false;
+
+ UNREACHABLE();
}
int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
@@ -763,10 +740,10 @@ void Deoptimizer::TraceMarkForDeoptimization(Code code, const char* reason) {
DeoptimizationData deopt_data = DeoptimizationData::cast(maybe_data);
CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[marking dependent code " V8PRIxPTR_FMT " ",
+ PrintF(scope.file(), "[marking dependent code " V8PRIxPTR_FMT " (",
code.ptr());
deopt_data.SharedFunctionInfo().ShortPrint(scope.file());
- PrintF(" (opt id %d) for deoptimization, reason: %s]\n",
+ PrintF(") (opt id %d) for deoptimization, reason: %s]\n",
deopt_data.OptimizationId().value(), reason);
{
AllowHeapAllocation yes_gc;
@@ -824,8 +801,8 @@ void Deoptimizer::TraceDeoptMarked(Isolate* isolate) {
// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
// When we call this function, the return address of the previous frame has
- // been removed from the stack by GenerateDeoptimizationEntries() so the stack
- // is not iterable by the SafeStackFrameIterator.
+ // been removed from the stack by the DeoptimizationEntry builtin, so the
+ // stack is not iterable by the SafeStackFrameIterator.
#if V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK
DCHECK_EQ(0, isolate()->isolate_data()->stack_is_iterable());
#endif
@@ -887,7 +864,7 @@ void Deoptimizer::DoComputeOutputFrames() {
// Do the input frame to output frame(s) translation.
size_t count = translated_state_.frames().size();
// If we are supposed to go to the catch handler, find the catching frame
- // for the catch and make sure we only deoptimize upto that frame.
+ // for the catch and make sure we only deoptimize up to that frame.
if (deoptimizing_throw_) {
size_t catch_handler_frame_index = count;
for (size_t i = count; i-- > 0;) {
@@ -986,9 +963,22 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
goto_catch_handler ? catch_handler_pc_offset_ : real_bytecode_offset;
const int parameters_count = InternalFormalParameterCountWithReceiver(shared);
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // If this is the bottom most frame or the previous frame was the arguments
+ // adaptor fake frame, then we already have extra arguments in the stack
+ // (including any extra padding). Therefore we should not try to add any
+ // padding.
+ bool should_pad_arguments =
+ !is_bottommost && (translated_state_.frames()[frame_index - 1]).kind() !=
+ TranslatedFrame::kArgumentsAdaptor;
+#else
+ bool should_pad_arguments = true;
+#endif
+
const int locals_count = translated_frame->height();
- InterpretedFrameInfo frame_info =
- InterpretedFrameInfo::Precise(parameters_count, locals_count, is_topmost);
+ InterpretedFrameInfo frame_info = InterpretedFrameInfo::Precise(
+ parameters_count, locals_count, is_topmost, should_pad_arguments);
const uint32_t output_frame_size = frame_info.frame_size_in_bytes();
TranslatedFrame::iterator function_iterator = value_iterator++;
@@ -1020,9 +1010,10 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// Compute the incoming parameter translation.
ReadOnlyRoots roots(isolate());
- if (ShouldPadArguments(parameters_count)) {
+ if (should_pad_arguments && ShouldPadArguments(parameters_count)) {
frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
+
// Note: parameters_count includes the receiver.
if (verbose_tracing_enabled() && is_bottommost &&
actual_argument_count_ > parameters_count - 1) {
@@ -1032,7 +1023,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
}
frame_writer.PushStackJSArguments(value_iterator, parameters_count);
- DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(),
+ DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(should_pad_arguments),
frame_writer.top_offset());
if (verbose_tracing_enabled()) {
PrintF(trace_scope()->file(), " -------------------------\n");
@@ -1218,7 +1209,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
: builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
if (is_topmost) {
// Only the pc of the topmost frame needs to be signed since it is
- // authenticated at the end of GenerateDeoptimizationEntries.
+ // authenticated at the end of the DeoptimizationEntry builtin.
const intptr_t top_most_pc = PointerAuthentication::SignAndCheckPC(
static_cast<intptr_t>(dispatch_builtin.InstructionStart()),
frame_writer.frame()->GetTop());
@@ -1274,9 +1265,13 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
translated_frame->raw_shared_info().internal_formal_parameter_count();
const int extra_argument_count =
argument_count_without_receiver - formal_parameter_count;
-
+ // The number of pushed arguments is the maximum of the actual argument count
+ // and the formal parameter count + the receiver.
+ const bool should_pad_args = ShouldPadArguments(
+ std::max(argument_count_without_receiver, formal_parameter_count) + 1);
const int output_frame_size =
- std::max(0, extra_argument_count * kSystemPointerSize);
+ std::max(0, extra_argument_count * kSystemPointerSize) +
+ (should_pad_args ? kSystemPointerSize : 0);
if (verbose_tracing_enabled()) {
PrintF(trace_scope_->file(),
" translating arguments adaptor => variable_size=%d\n",
@@ -1296,14 +1291,14 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
output_frame->SetFp(output_[frame_index - 1]->GetFp());
output_[frame_index] = output_frame;
- if (extra_argument_count > 0) {
- FrameWriter frame_writer(this, output_frame, verbose_trace_scope());
+ FrameWriter frame_writer(this, output_frame, verbose_trace_scope());
- ReadOnlyRoots roots(isolate());
- if (ShouldPadArguments(extra_argument_count)) {
- frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
- }
+ ReadOnlyRoots roots(isolate());
+ if (should_pad_args) {
+ frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
+ }
+ if (extra_argument_count > 0) {
// The receiver and arguments with index below the formal parameter
// count are in the fake adaptor frame, because they are used to create the
// arguments object. We should however not push them, since the interpreter
@@ -1545,7 +1540,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
intptr_t pc_value = static_cast<intptr_t>(start + pc_offset);
if (is_topmost) {
// Only the pc of the topmost frame needs to be signed since it is
- // authenticated at the end of GenerateDeoptimizationEntries.
+ // authenticated at the end of the DeoptimizationEntry builtin.
output_frame->SetPc(PointerAuthentication::SignAndCheckPC(
pc_value, frame_writer.frame()->GetTop()));
} else {
@@ -1765,7 +1760,6 @@ void Deoptimizer::DoComputeBuiltinContinuation(
frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
-#ifdef V8_REVERSE_JSARGS
if (mode == BuiltinContinuationMode::STUB) {
DCHECK_EQ(Builtins::CallInterfaceDescriptorFor(builtin_name)
.GetStackArgumentOrder(),
@@ -1805,34 +1799,6 @@ void Deoptimizer::DoComputeBuiltinContinuation(
frame_writer.PushStackJSArguments(
value_iterator, frame_info.translated_stack_parameter_count());
}
-#else
- for (uint32_t i = 0; i < frame_info.translated_stack_parameter_count();
- ++i, ++value_iterator) {
- frame_writer.PushTranslatedValue(value_iterator, "stack parameter");
- }
-
- switch (mode) {
- case BuiltinContinuationMode::STUB:
- break;
- case BuiltinContinuationMode::JAVASCRIPT:
- break;
- case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: {
- frame_writer.PushRawObject(roots.the_hole_value(),
- "placeholder for exception on lazy deopt\n");
- } break;
- case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: {
- intptr_t accumulator_value =
- input_->GetRegister(kInterpreterAccumulatorRegister.code());
- frame_writer.PushRawObject(Object(accumulator_value),
- "exception (from accumulator)\n");
- } break;
- }
-
- if (frame_info.frame_has_result_stack_slot()) {
- frame_writer.PushRawObject(roots.the_hole_value(),
- "placeholder for return result on lazy deopt\n");
- }
-#endif
DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(),
frame_writer.top_offset());
@@ -1975,7 +1941,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
mode, frame_info.frame_has_result_stack_slot()));
if (is_topmost) {
// Only the pc of the topmost frame needs to be signed since it is
- // authenticated at the end of GenerateDeoptimizationEntries.
+ // authenticated at the end of the DeoptimizationEntry builtin.
const intptr_t top_most_pc = PointerAuthentication::SignAndCheckPC(
static_cast<intptr_t>(continue_to_builtin.InstructionStart()),
frame_writer.frame()->GetTop());
@@ -2068,43 +2034,12 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
// static
unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo shared) {
int parameter_slots = InternalFormalParameterCountWithReceiver(shared);
+#ifndef V8_NO_ARGUMENTS_ADAPTOR
if (ShouldPadArguments(parameter_slots)) parameter_slots++;
+#endif
return parameter_slots * kSystemPointerSize;
}
-void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
- DeoptimizeKind kind) {
- CHECK(kind == DeoptimizeKind::kEager || kind == DeoptimizeKind::kSoft ||
- kind == DeoptimizeKind::kLazy || kind == DeoptimizeKind::kBailout);
- DeoptimizerData* data = isolate->deoptimizer_data();
- if (!data->deopt_entry_code(kind).is_null()) return;
-
- MacroAssembler masm(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(16 * KB));
- masm.set_emit_debug_code(false);
- GenerateDeoptimizationEntries(&masm, masm.isolate(), kind);
- CodeDesc desc;
- masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
-
- // Allocate the code as immovable since the entry addresses will be used
- // directly and there is no support for relocating them.
- Handle<Code> code = Factory::CodeBuilder(isolate, desc, CodeKind::STUB)
- .set_immovable()
- .Build();
- CHECK(isolate->heap()->IsImmovable(*code));
-
- CHECK(data->deopt_entry_code(kind).is_null());
- data->set_deopt_entry_code(kind, *code);
-}
-
-void Deoptimizer::EnsureCodeForDeoptimizationEntries(Isolate* isolate) {
- EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kEager);
- EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kLazy);
- EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kSoft);
- EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kBailout);
-}
-
FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
: frame_size_(frame_size),
parameter_count_(parameter_count),
@@ -3182,25 +3117,19 @@ void TranslatedState::CreateArgumentsElementsTranslatedValues(
if (type == CreateArgumentsType::kMappedArguments) {
// If the actual number of arguments is less than the number of formal
// parameters, we have fewer holes to fill to not overshoot the length.
- number_of_holes = Min(formal_parameter_count_, length);
+ number_of_holes = std::min(formal_parameter_count_, length);
}
for (int i = 0; i < number_of_holes; ++i) {
frame.Add(TranslatedValue::NewTagged(this, roots.the_hole_value()));
}
int argc = length - number_of_holes;
-#ifdef V8_REVERSE_JSARGS
int start_index = number_of_holes;
if (type == CreateArgumentsType::kRestParameter) {
start_index = std::max(0, formal_parameter_count_);
}
-#endif
for (int i = 0; i < argc; i++) {
// Skip the receiver.
-#ifdef V8_REVERSE_JSARGS
int offset = i + start_index + 1;
-#else
- int offset = argc - i - 1;
-#endif
#ifdef V8_NO_ARGUMENTS_ADAPTOR
Address arguments_frame = offset > formal_parameter_count_
? stack_frame_pointer_
@@ -3556,12 +3485,12 @@ TranslatedState::TranslatedState(const JavaScriptFrame* frame) {
DCHECK(!data.is_null() && deopt_index != Safepoint::kNoDeoptimizationIndex);
TranslationIterator it(data.TranslationByteArray(),
data.TranslationIndex(deopt_index).value());
-#ifdef V8_NO_ARGUMENT_ADAPTOR
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
int actual_argc = frame->GetActualArgumentCount();
#else
int actual_argc = 0;
#endif
- Init(frame->isolate(), frame->fp(), kNullAddress, &it, data.LiteralArray(),
+ Init(frame->isolate(), frame->fp(), frame->fp(), &it, data.LiteralArray(),
nullptr /* registers */, nullptr /* trace file */,
frame->function().shared().internal_formal_parameter_count(),
actual_argc);
@@ -3999,7 +3928,8 @@ void TranslatedState::EnsurePropertiesAllocatedAndMarked(
properties_slot->set_storage(object_storage);
// Set markers for out-of-object properties.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate());
for (InternalIndex i : map->IterateOwnDescriptors()) {
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
Representation representation = descriptors->GetDetails(i).representation();
@@ -4032,7 +3962,8 @@ void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot,
Handle<ByteArray> object_storage = AllocateStorageFor(slot);
// Now we handle the interesting (JSObject) case.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate());
// Set markers for in-object properties.
for (InternalIndex i : map->IterateOwnDescriptors()) {
diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index 152e5e510e..df13009acd 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -500,12 +500,13 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
- static Address GetDeoptimizationEntry(Isolate* isolate, DeoptimizeKind kind);
+ V8_EXPORT_PRIVATE static Builtins::Name GetDeoptimizationEntry(
+ Isolate* isolate, DeoptimizeKind kind);
// Returns true if {addr} is a deoptimization entry and stores its type in
- // {type}. Returns false if {addr} is not a deoptimization entry.
+ // {type_out}. Returns false if {addr} is not a deoptimization entry.
static bool IsDeoptimizationEntry(Isolate* isolate, Address addr,
- DeoptimizeKind* type);
+ DeoptimizeKind* type_out);
// Code generation support.
static int input_offset() { return offsetof(Deoptimizer, input_); }
@@ -520,25 +521,26 @@ class Deoptimizer : public Malloced {
V8_EXPORT_PRIVATE static int GetDeoptimizedCodeCount(Isolate* isolate);
- static const int kNotDeoptimizationEntry = -1;
-
- static void EnsureCodeForDeoptimizationEntry(Isolate* isolate,
- DeoptimizeKind kind);
- static void EnsureCodeForDeoptimizationEntries(Isolate* isolate);
-
Isolate* isolate() const { return isolate_; }
- static const int kMaxNumberOfEntries = 16384;
+ static constexpr int kMaxNumberOfEntries = 16384;
+
+ // This marker is passed to Deoptimizer::New as {bailout_id} on platforms
+ // that have fixed deopt sizes (see also kSupportsFixedDeoptExitSizes). The
+ // actual deoptimization id is then calculated from the return address.
+ static constexpr unsigned kFixedExitSizeMarker = kMaxUInt32;
// Set to true when the architecture supports deoptimization exit sequences
// of a fixed size, that can be sorted so that the deoptimization index is
// deduced from the address of the deoptimization exit.
- static const bool kSupportsFixedDeoptExitSizes;
+ // TODO(jgruber): Remove this, and support for variable deopt exit sizes,
+ // once all architectures use fixed exit sizes.
+ V8_EXPORT_PRIVATE static const bool kSupportsFixedDeoptExitSizes;
// Size of deoptimization exit sequence. This is only meaningful when
// kSupportsFixedDeoptExitSizes is true.
- static const int kNonLazyDeoptExitSize;
- static const int kLazyDeoptExitSize;
+ V8_EXPORT_PRIVATE static const int kNonLazyDeoptExitSize;
+ V8_EXPORT_PRIVATE static const int kLazyDeoptExitSize;
// Tracing.
static void TraceMarkForDeoptimization(Code code, const char* reason);
@@ -555,9 +557,6 @@ class Deoptimizer : public Malloced {
Code FindOptimizedCode();
void DeleteFrameDescriptions();
- static bool IsDeoptimizationEntry(Isolate* isolate, Address addr,
- DeoptimizeKind type);
-
void DoComputeOutputFrames();
void DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
int frame_index, bool goto_catch_handler);
@@ -579,10 +578,6 @@ class Deoptimizer : public Malloced {
static unsigned ComputeIncomingArgumentSize(SharedFunctionInfo shared);
static unsigned ComputeOutgoingArgumentSize(Code code, unsigned bailout_id);
- static void GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind kind);
-
static void MarkAllCodeForContext(NativeContext native_context);
static void DeoptimizeMarkedCodeForContext(NativeContext native_context);
// Searches the list of known deoptimizing code for a Code object
@@ -717,15 +712,23 @@ class FrameDescription {
return *GetFrameSlotPointer(offset);
}
- unsigned GetLastArgumentSlotOffset() {
+ unsigned GetLastArgumentSlotOffset(bool pad_arguments = true) {
int parameter_slots = parameter_count();
- if (ShouldPadArguments(parameter_slots)) parameter_slots++;
+ if (pad_arguments && ShouldPadArguments(parameter_slots)) parameter_slots++;
return GetFrameSize() - parameter_slots * kSystemPointerSize;
}
Address GetFramePointerAddress() {
- int fp_offset =
- GetLastArgumentSlotOffset() - StandardFrameConstants::kCallerSPOffset;
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // We should not pad arguments in the bottom frame, since this
+ // already contain a padding if necessary and it might contain
+ // extra arguments (actual argument count > parameter count).
+ const bool pad_arguments_bottom_frame = false;
+#else
+ const bool pad_arguments_bottom_frame = true;
+#endif
+ int fp_offset = GetLastArgumentSlotOffset(pad_arguments_bottom_frame) -
+ StandardFrameConstants::kCallerSPOffset;
return reinterpret_cast<Address>(GetFrameSlotPointer(fp_offset));
}
@@ -779,7 +782,7 @@ class FrameDescription {
return offsetof(FrameDescription, register_values_.registers_);
}
- static int double_registers_offset() {
+ static constexpr int double_registers_offset() {
return offsetof(FrameDescription, register_values_.double_registers_);
}
@@ -827,36 +830,6 @@ class FrameDescription {
}
};
-class DeoptimizerData {
- public:
- explicit DeoptimizerData(Heap* heap);
- ~DeoptimizerData();
-
-#ifdef DEBUG
- bool IsDeoptEntryCode(Code code) const {
- for (int i = 0; i < kLastDeoptimizeKind + 1; i++) {
- if (code == deopt_entry_code_[i]) return true;
- }
- return false;
- }
-#endif // DEBUG
-
- private:
- Heap* heap_;
- static const int kLastDeoptimizeKind =
- static_cast<int>(DeoptimizeKind::kLastDeoptimizeKind);
- Code deopt_entry_code_[kLastDeoptimizeKind + 1];
- Code deopt_entry_code(DeoptimizeKind kind);
- void set_deopt_entry_code(DeoptimizeKind kind, Code code);
-
- Deoptimizer* current_;
- StrongRootsEntry* strong_roots_entry_;
-
- friend class Deoptimizer;
-
- DISALLOW_COPY_AND_ASSIGN(DeoptimizerData);
-};
-
class TranslationBuffer {
public:
explicit TranslationBuffer(Zone* zone) : contents_(zone) {}
diff --git a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
index 19be03c1e3..4fcb22c209 100644
--- a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
@@ -4,201 +4,14 @@
#if V8_TARGET_ARCH_IA32
-#include "src/codegen/macro-assembler.h"
-#include "src/codegen/register-configuration.h"
-#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
-#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
-const int Deoptimizer::kNonLazyDeoptExitSize = 0;
-const int Deoptimizer::kLazyDeoptExitSize = 0;
-
-#define __ masm->
-
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind deopt_kind) {
- NoRootArrayScope no_root_array(masm);
-
- // Save all general purpose registers before messing with them.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
- __ AllocateStackSpace(kDoubleRegsSize);
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- XMMRegister xmm_reg = XMMRegister::from_code(code);
- int offset = code * kDoubleSize;
- __ movsd(Operand(esp, offset), xmm_reg);
- }
-
- __ pushad();
-
- ExternalReference c_entry_fp_address =
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate);
- __ mov(masm->ExternalReferenceAsOperand(c_entry_fp_address, esi), ebp);
-
- const int kSavedRegistersAreaSize =
- kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
-
- // The bailout id is passed in ebx by the caller.
-
- // Get the address of the location in the code object
- // and compute the fp-to-sp delta in register edx.
- __ mov(ecx, Operand(esp, kSavedRegistersAreaSize));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kSystemPointerSize));
-
- __ sub(edx, ebp);
- __ neg(edx);
-
- // Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6, eax);
- __ mov(eax, Immediate(0));
- Label context_check;
- __ mov(edi, Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(edi, &context_check);
- __ mov(eax, Operand(ebp, StandardFrameConstants::kFunctionOffset));
- __ bind(&context_check);
- __ mov(Operand(esp, 0 * kSystemPointerSize), eax); // Function.
- __ mov(Operand(esp, 1 * kSystemPointerSize),
- Immediate(static_cast<int>(deopt_kind)));
- __ mov(Operand(esp, 2 * kSystemPointerSize), ebx); // Bailout id.
- __ mov(Operand(esp, 3 * kSystemPointerSize), ecx); // Code address or 0.
- __ mov(Operand(esp, 4 * kSystemPointerSize), edx); // Fp-to-sp delta.
- __ mov(Operand(esp, 5 * kSystemPointerSize),
- Immediate(ExternalReference::isolate_address(isolate)));
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
- }
-
- // Preserve deoptimizer object in register eax and get the input
- // frame descriptor pointer.
- __ mov(esi, Operand(eax, Deoptimizer::input_offset()));
-
- // Fill in the input registers.
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset =
- (i * kSystemPointerSize) + FrameDescription::registers_offset();
- __ pop(Operand(esi, offset));
- }
-
- int double_regs_offset = FrameDescription::double_registers_offset();
- // Fill in the double input registers.
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset = code * kDoubleSize;
- __ movsd(xmm0, Operand(esp, src_offset));
- __ movsd(Operand(esi, dst_offset), xmm0);
- }
-
- // Clear FPU all exceptions.
- // TODO(ulan): Find out why the TOP register is not zero here in some cases,
- // and check that the generated code never deoptimizes with unbalanced stack.
- __ fnclex();
-
- // Mark the stack as not iterable for the CPU profiler which won't be able to
- // walk the stack without the return address.
- __ mov_b(__ ExternalReferenceAsOperand(
- ExternalReference::stack_is_iterable_address(isolate), edx),
- Immediate(0));
-
- // Remove the return address and the double registers.
- __ add(esp, Immediate(kDoubleRegsSize + 1 * kSystemPointerSize));
-
- // Compute a pointer to the unwinding limit in register ecx; that is
- // the first stack slot not part of the input frame.
- __ mov(ecx, Operand(esi, FrameDescription::frame_size_offset()));
- __ add(ecx, esp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ lea(edx, Operand(esi, FrameDescription::frame_content_offset()));
- Label pop_loop_header;
- __ jmp(&pop_loop_header);
- Label pop_loop;
- __ bind(&pop_loop);
- __ pop(Operand(edx, 0));
- __ add(edx, Immediate(sizeof(uint32_t)));
- __ bind(&pop_loop_header);
- __ cmp(ecx, esp);
- __ j(not_equal, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(eax);
- __ PrepareCallCFunction(1, esi);
- __ mov(Operand(esp, 0 * kSystemPointerSize), eax);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
- }
- __ pop(eax);
-
- __ mov(esp, Operand(eax, Deoptimizer::caller_frame_top_offset()));
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
- // Outer loop state: eax = current FrameDescription**, edx = one
- // past the last FrameDescription**.
- __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
- __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
- __ lea(edx, Operand(eax, edx, times_system_pointer_size, 0));
- __ jmp(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: esi = current FrameDescription*, ecx = loop
- // index.
- __ mov(esi, Operand(eax, 0));
- __ mov(ecx, Operand(esi, FrameDescription::frame_size_offset()));
- __ jmp(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ sub(ecx, Immediate(sizeof(uint32_t)));
- __ push(Operand(esi, ecx, times_1, FrameDescription::frame_content_offset()));
- __ bind(&inner_loop_header);
- __ test(ecx, ecx);
- __ j(not_zero, &inner_push_loop);
- __ add(eax, Immediate(kSystemPointerSize));
- __ bind(&outer_loop_header);
- __ cmp(eax, edx);
- __ j(below, &outer_push_loop);
-
- // In case of a failed STUB, we have to restore the XMM registers.
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- XMMRegister xmm_reg = XMMRegister::from_code(code);
- int src_offset = code * kDoubleSize + double_regs_offset;
- __ movsd(xmm_reg, Operand(esi, src_offset));
- }
-
- // Push pc and continuation from the last output frame.
- __ push(Operand(esi, FrameDescription::pc_offset()));
- __ push(Operand(esi, FrameDescription::continuation_offset()));
-
- // Push the registers from the last output frame.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset =
- (i * kSystemPointerSize) + FrameDescription::registers_offset();
- __ push(Operand(esi, offset));
- }
-
- __ mov_b(__ ExternalReferenceAsOperand(
- ExternalReference::stack_is_iterable_address(isolate), edx),
- Immediate(1));
-
- // Restore the registers from the stack.
- __ popad();
-
- __ InitializeRootRegister();
-
- // Return to the continuation point.
- __ ret(0);
-}
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 5;
+const int Deoptimizer::kLazyDeoptExitSize = 5;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
@@ -220,8 +33,6 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
index 80221c5cbe..532f7a9b54 100644
--- a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
@@ -2,215 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/codegen/macro-assembler.h"
-#include "src/codegen/register-configuration.h"
-#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
-const int Deoptimizer::kNonLazyDeoptExitSize = 0;
-const int Deoptimizer::kLazyDeoptExitSize = 0;
-
-#define __ masm->
-
-// This code tries to be close to ia32 code so that any changes can be
-// easily ported.
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind deopt_kind) {
- NoRootArrayScope no_root_array(masm);
-
- // Unlike on ARM we don't save all the registers, just the useful ones.
- // For the rest, there are gaps on the stack, so the offsets remain the same.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- RegList restored_regs = kJSCallerSaved | kCalleeSaved;
- RegList saved_regs = restored_regs | sp.bit() | ra.bit();
-
- const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
-
- // Save all FPU registers before messing with them.
- __ Subu(sp, sp, Operand(kDoubleRegsSize));
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
- int offset = code * kDoubleSize;
- __ Sdc1(fpu_reg, MemOperand(sp, offset));
- }
-
- // Push saved_regs (needed to populate FrameDescription::registers_).
- // Leave gaps for other registers.
- __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
- for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
- if ((saved_regs & (1 << i)) != 0) {
- __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
- }
- }
-
- __ li(a2, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate)));
- __ sw(fp, MemOperand(a2));
-
- const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
-
- // Get the bailout id is passed as kRootRegister by the caller.
- __ mov(a2, kRootRegister);
-
- // Get the address of the location in the code object (a3) (return
- // address for lazy deoptimization) and compute the fp-to-sp delta in
- // register t0.
- __ mov(a3, ra);
- __ Addu(t0, sp, Operand(kSavedRegistersAreaSize));
- __ Subu(t0, fp, t0);
-
- // Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6, t1);
- // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
- __ mov(a0, zero_reg);
- Label context_check;
- __ lw(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(a1, &context_check);
- __ lw(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ bind(&context_check);
- __ li(a1, Operand(static_cast<int>(deopt_kind)));
- // a2: bailout id already loaded.
- // a3: code address or 0 already loaded.
- __ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
- __ li(t1, Operand(ExternalReference::isolate_address(isolate)));
- __ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
- // Call Deoptimizer::New().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
- }
-
- // Preserve "deoptimizer" object in register v0 and get the input
- // frame descriptor pointer to a1 (deoptimizer->input_);
- // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
- __ mov(a0, v0);
- __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
-
- // Copy core registers into FrameDescription::registers_[kNumRegisters].
- DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((saved_regs & (1 << i)) != 0) {
- __ lw(a2, MemOperand(sp, i * kPointerSize));
- __ sw(a2, MemOperand(a1, offset));
- } else if (FLAG_debug_code) {
- __ li(a2, kDebugZapValue);
- __ sw(a2, MemOperand(a1, offset));
- }
- }
-
- int double_regs_offset = FrameDescription::double_registers_offset();
- // Copy FPU registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset =
- code * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ Ldc1(f0, MemOperand(sp, src_offset));
- __ Sdc1(f0, MemOperand(a1, dst_offset));
- }
-
- // Remove the saved registers from the stack.
- __ Addu(sp, sp, Operand(kSavedRegistersAreaSize));
-
- // Compute a pointer to the unwinding limit in register a2; that is
- // the first stack slot not part of the input frame.
- __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
- __ Addu(a2, a2, sp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
- Label pop_loop;
- Label pop_loop_header;
- __ BranchShort(&pop_loop_header);
- __ bind(&pop_loop);
- __ pop(t0);
- __ sw(t0, MemOperand(a3, 0));
- __ addiu(a3, a3, sizeof(uint32_t));
- __ bind(&pop_loop_header);
- __ BranchShort(&pop_loop, ne, a2, Operand(sp));
-
- // Compute the output frame in the deoptimizer.
- __ push(a0); // Preserve deoptimizer object across call.
- // a0: deoptimizer object; a1: scratch.
- __ PrepareCallCFunction(1, a1);
- // Call Deoptimizer::ComputeOutputFrames().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
- }
- __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
-
- __ lw(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
- // Outer loop state: t0 = current "FrameDescription** output_",
- // a1 = one past the last FrameDescription**.
- __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
- __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
- __ Lsa(a1, t0, a1, kPointerSizeLog2);
- __ BranchShort(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
- __ lw(a2, MemOperand(t0, 0)); // output_[ix]
- __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
- __ BranchShort(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ Subu(a3, a3, Operand(sizeof(uint32_t)));
- __ Addu(t2, a2, Operand(a3));
- __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
- __ push(t3);
- __ bind(&inner_loop_header);
- __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
-
- __ Addu(t0, t0, Operand(kPointerSize));
- __ bind(&outer_loop_header);
- __ BranchShort(&outer_push_loop, lt, t0, Operand(a1));
-
- __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
- int src_offset = code * kDoubleSize + double_regs_offset;
- __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
- }
-
- // Push pc and continuation from the last output frame.
- __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
- __ push(t2);
- __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
- __ push(t2);
-
- // Technically restoring 'at' should work unless zero_reg is also restored
- // but it's safer to check for this.
- DCHECK(!(at.bit() & restored_regs));
- // Restore the registers from the last output frame.
- __ mov(at, a2);
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
- __ lw(ToRegister(i), MemOperand(at, offset));
- }
- }
-
- __ pop(at); // Get continuation, leave pc on stack.
- __ pop(ra);
- __ Jump(at);
- __ stop();
-}
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 3 * kInstrSize;
+const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize;
// Maximum size of a table entry generated below.
#ifdef _MIPS_ARCH_MIPS32R6
@@ -239,7 +38,5 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
index 3b8b1b9659..227c002b88 100644
--- a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
@@ -2,215 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/codegen/macro-assembler.h"
-#include "src/codegen/register-configuration.h"
-#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
-const int Deoptimizer::kNonLazyDeoptExitSize = 0;
-const int Deoptimizer::kLazyDeoptExitSize = 0;
-
-#define __ masm->
-
-// This code tries to be close to ia32 code so that any changes can be
-// easily ported.
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind deopt_kind) {
- NoRootArrayScope no_root_array(masm);
-
- // Unlike on ARM we don't save all the registers, just the useful ones.
- // For the rest, there are gaps on the stack, so the offsets remain the same.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- RegList restored_regs = kJSCallerSaved | kCalleeSaved;
- RegList saved_regs = restored_regs | sp.bit() | ra.bit();
-
- const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
-
- // Save all double FPU registers before messing with them.
- __ Dsubu(sp, sp, Operand(kDoubleRegsSize));
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
- int offset = code * kDoubleSize;
- __ Sdc1(fpu_reg, MemOperand(sp, offset));
- }
-
- // Push saved_regs (needed to populate FrameDescription::registers_).
- // Leave gaps for other registers.
- __ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize);
- for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
- if ((saved_regs & (1 << i)) != 0) {
- __ Sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
- }
- }
-
- __ li(a2, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate)));
- __ Sd(fp, MemOperand(a2));
-
- const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
-
- // Get the bailout is passed as kRootRegister by the caller.
- __ mov(a2, kRootRegister);
-
- // Get the address of the location in the code object (a3) (return
- // address for lazy deoptimization) and compute the fp-to-sp delta in
- // register a4.
- __ mov(a3, ra);
- __ Daddu(a4, sp, Operand(kSavedRegistersAreaSize));
-
- __ Dsubu(a4, fp, a4);
-
- // Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6, a5);
- // Pass six arguments, according to n64 ABI.
- __ mov(a0, zero_reg);
- Label context_check;
- __ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(a1, &context_check);
- __ Ld(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ bind(&context_check);
- __ li(a1, Operand(static_cast<int>(deopt_kind)));
- // a2: bailout id already loaded.
- // a3: code address or 0 already loaded.
- // a4: already has fp-to-sp delta.
- __ li(a5, Operand(ExternalReference::isolate_address(isolate)));
-
- // Call Deoptimizer::New().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
- }
-
- // Preserve "deoptimizer" object in register v0 and get the input
- // frame descriptor pointer to a1 (deoptimizer->input_);
- // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
- __ mov(a0, v0);
- __ Ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
-
- // Copy core registers into FrameDescription::registers_[kNumRegisters].
- DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((saved_regs & (1 << i)) != 0) {
- __ Ld(a2, MemOperand(sp, i * kPointerSize));
- __ Sd(a2, MemOperand(a1, offset));
- } else if (FLAG_debug_code) {
- __ li(a2, kDebugZapValue);
- __ Sd(a2, MemOperand(a1, offset));
- }
- }
-
- int double_regs_offset = FrameDescription::double_registers_offset();
- // Copy FPU registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset =
- code * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ Ldc1(f0, MemOperand(sp, src_offset));
- __ Sdc1(f0, MemOperand(a1, dst_offset));
- }
-
- // Remove the saved registers from the stack.
- __ Daddu(sp, sp, Operand(kSavedRegistersAreaSize));
-
- // Compute a pointer to the unwinding limit in register a2; that is
- // the first stack slot not part of the input frame.
- __ Ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
- __ Daddu(a2, a2, sp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ Daddu(a3, a1, Operand(FrameDescription::frame_content_offset()));
- Label pop_loop;
- Label pop_loop_header;
- __ BranchShort(&pop_loop_header);
- __ bind(&pop_loop);
- __ pop(a4);
- __ Sd(a4, MemOperand(a3, 0));
- __ daddiu(a3, a3, sizeof(uint64_t));
- __ bind(&pop_loop_header);
- __ BranchShort(&pop_loop, ne, a2, Operand(sp));
- // Compute the output frame in the deoptimizer.
- __ push(a0); // Preserve deoptimizer object across call.
- // a0: deoptimizer object; a1: scratch.
- __ PrepareCallCFunction(1, a1);
- // Call Deoptimizer::ComputeOutputFrames().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
- }
- __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
-
- __ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
- // Outer loop state: a4 = current "FrameDescription** output_",
- // a1 = one past the last FrameDescription**.
- __ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
- __ Ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
- __ Dlsa(a1, a4, a1, kPointerSizeLog2);
- __ BranchShort(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
- __ Ld(a2, MemOperand(a4, 0)); // output_[ix]
- __ Ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
- __ BranchShort(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
- __ Daddu(a6, a2, Operand(a3));
- __ Ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
- __ push(a7);
- __ bind(&inner_loop_header);
- __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
-
- __ Daddu(a4, a4, Operand(kPointerSize));
- __ bind(&outer_loop_header);
- __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
-
- __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
- int src_offset = code * kDoubleSize + double_regs_offset;
- __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
- }
-
- // Push pc and continuation from the last output frame.
- __ Ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
- __ push(a6);
- __ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
- __ push(a6);
-
- // Technically restoring 'at' should work unless zero_reg is also restored
- // but it's safer to check for this.
- DCHECK(!(at.bit() & restored_regs));
- // Restore the registers from the last output frame.
- __ mov(at, a2);
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
- __ Ld(ToRegister(i), MemOperand(at, offset));
- }
- }
-
- __ pop(at); // Get continuation, leave pc on stack.
- __ pop(ra);
- __ Jump(at);
- __ stop();
-}
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 3 * kInstrSize;
+const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize;
// Maximum size of a table entry generated below.
#ifdef _MIPS_ARCH_MIPS64R6
@@ -239,7 +38,5 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
index f8959752b7..817c301431 100644
--- a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
@@ -11,238 +11,9 @@
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
-const int Deoptimizer::kNonLazyDeoptExitSize = 0;
-const int Deoptimizer::kLazyDeoptExitSize = 0;
-
-#define __ masm->
-
-// This code tries to be close to ia32 code so that any changes can be
-// easily ported.
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind deopt_kind) {
- NoRootArrayScope no_root_array(masm);
-
- // Unlike on ARM we don't save all the registers, just the useful ones.
- // For the rest, there are gaps on the stack, so the offsets remain the same.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- RegList restored_regs = kJSCallerSaved | kCalleeSaved;
- RegList saved_regs = restored_regs | sp.bit();
-
- const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
-
- // Save all double registers before messing with them.
- __ subi(sp, sp, Operand(kDoubleRegsSize));
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- const DoubleRegister dreg = DoubleRegister::from_code(code);
- int offset = code * kDoubleSize;
- __ stfd(dreg, MemOperand(sp, offset));
- }
-
- // Push saved_regs (needed to populate FrameDescription::registers_).
- // Leave gaps for other registers.
- __ subi(sp, sp, Operand(kNumberOfRegisters * kSystemPointerSize));
- for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
- if ((saved_regs & (1 << i)) != 0) {
- __ StoreP(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
- }
- }
- {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
- __ mov(scratch, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate)));
- __ StoreP(fp, MemOperand(scratch));
- }
- const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
-
- // Get the bailout id is passed as r29 by the caller.
- __ mr(r5, r29);
-
- // Get the address of the location in the code object (r6) (return
- // address for lazy deoptimization) and compute the fp-to-sp delta in
- // register r7.
- __ mflr(r6);
- __ addi(r7, sp, Operand(kSavedRegistersAreaSize));
- __ sub(r7, fp, r7);
-
- // Allocate a new deoptimizer object.
- // Pass six arguments in r3 to r8.
- __ PrepareCallCFunction(6, r8);
- __ li(r3, Operand::Zero());
- Label context_check;
- __ LoadP(r4, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(r4, &context_check);
- __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ bind(&context_check);
- __ li(r4, Operand(static_cast<int>(deopt_kind)));
- // r5: bailout id already loaded.
- // r6: code address or 0 already loaded.
- // r7: Fp-to-sp delta.
- __ mov(r8, Operand(ExternalReference::isolate_address(isolate)));
- // Call Deoptimizer::New().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
- }
-
- // Preserve "deoptimizer" object in register r3 and get the input
- // frame descriptor pointer to r4 (deoptimizer->input_);
- __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
-
- // Copy core registers into FrameDescription::registers_[kNumRegisters].
- DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset =
- (i * kSystemPointerSize) + FrameDescription::registers_offset();
- __ LoadP(r5, MemOperand(sp, i * kSystemPointerSize));
- __ StoreP(r5, MemOperand(r4, offset));
- }
-
- int double_regs_offset = FrameDescription::double_registers_offset();
- // Copy double registers to
- // double_registers_[DoubleRegister::kNumRegisters]
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset =
- code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
- __ lfd(d0, MemOperand(sp, src_offset));
- __ stfd(d0, MemOperand(r4, dst_offset));
- }
-
- // Mark the stack as not iterable for the CPU profiler which won't be able to
- // walk the stack without the return address.
- {
- UseScratchRegisterScope temps(masm);
- Register is_iterable = temps.Acquire();
- Register zero = r7;
- __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
- __ li(zero, Operand(0));
- __ stb(zero, MemOperand(is_iterable));
- }
-
- // Remove the saved registers from the stack.
- __ addi(sp, sp, Operand(kSavedRegistersAreaSize));
-
- // Compute a pointer to the unwinding limit in register r5; that is
- // the first stack slot not part of the input frame.
- __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
- __ add(r5, r5, sp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ addi(r6, r4, Operand(FrameDescription::frame_content_offset()));
- Label pop_loop;
- Label pop_loop_header;
- __ b(&pop_loop_header);
- __ bind(&pop_loop);
- __ pop(r7);
- __ StoreP(r7, MemOperand(r6, 0));
- __ addi(r6, r6, Operand(kSystemPointerSize));
- __ bind(&pop_loop_header);
- __ cmp(r5, sp);
- __ bne(&pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(r3); // Preserve deoptimizer object across call.
- // r3: deoptimizer object; r4: scratch.
- __ PrepareCallCFunction(1, r4);
- // Call Deoptimizer::ComputeOutputFrames().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
- }
- __ pop(r3); // Restore deoptimizer object (class Deoptimizer).
-
- __ LoadP(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset()));
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
- // Outer loop state: r7 = current "FrameDescription** output_",
- // r4 = one past the last FrameDescription**.
- __ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
- __ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
- __ ShiftLeftImm(r4, r4, Operand(kSystemPointerSizeLog2));
- __ add(r4, r7, r4);
- __ b(&outer_loop_header);
-
- __ bind(&outer_push_loop);
- // Inner loop state: r5 = current FrameDescription*, r6 = loop index.
- __ LoadP(r5, MemOperand(r7, 0)); // output_[ix]
- __ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
- __ b(&inner_loop_header);
-
- __ bind(&inner_push_loop);
- __ addi(r6, r6, Operand(-sizeof(intptr_t)));
- __ add(r9, r5, r6);
- __ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
- __ push(r9);
-
- __ bind(&inner_loop_header);
- __ cmpi(r6, Operand::Zero());
- __ bne(&inner_push_loop); // test for gt?
-
- __ addi(r7, r7, Operand(kSystemPointerSize));
- __ bind(&outer_loop_header);
- __ cmp(r7, r4);
- __ blt(&outer_push_loop);
-
- __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- const DoubleRegister dreg = DoubleRegister::from_code(code);
- int src_offset = code * kDoubleSize + double_regs_offset;
- __ lfd(dreg, MemOperand(r4, src_offset));
- }
-
- // Push pc, and continuation from the last output frame.
- __ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset()));
- __ push(r9);
- __ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset()));
- __ push(r9);
-
- // Restore the registers from the last output frame.
- {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
- DCHECK(!(scratch.bit() & restored_regs));
- __ mr(scratch, r5);
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset =
- (i * kSystemPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
- __ LoadP(ToRegister(i), MemOperand(scratch, offset));
- }
- }
- }
-
- {
- UseScratchRegisterScope temps(masm);
- Register is_iterable = temps.Acquire();
- Register one = r7;
- __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
- __ li(one, Operand(1));
- __ stb(one, MemOperand(is_iterable));
- }
-
- {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
- __ pop(scratch); // get continuation, leave pc on stack
- __ pop(r0);
- __ mtlr(r0);
- __ Jump(scratch);
- }
-
- __ stop();
-}
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 3 * kInstrSize;
+const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
float float_val = static_cast<float>(double_registers_[n].get_scalar());
@@ -264,6 +35,5 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
-#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
index 66d9d0db8e..358450c091 100644
--- a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
@@ -2,240 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/codegen/macro-assembler.h"
-#include "src/codegen/register-configuration.h"
-#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
-const int Deoptimizer::kNonLazyDeoptExitSize = 0;
-const int Deoptimizer::kLazyDeoptExitSize = 0;
-
-#define __ masm->
-
-// This code tries to be close to ia32 code so that any changes can be
-// easily ported.
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind deopt_kind) {
- NoRootArrayScope no_root_array(masm);
-
- // Save all the registers onto the stack
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- RegList restored_regs = kJSCallerSaved | kCalleeSaved;
-
- const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
-
- // Save all double registers before messing with them.
- __ lay(sp, MemOperand(sp, -kDoubleRegsSize));
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- const DoubleRegister dreg = DoubleRegister::from_code(code);
- int offset = code * kDoubleSize;
- __ StoreDouble(dreg, MemOperand(sp, offset));
- }
-
- // Push all GPRs onto the stack
- __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kSystemPointerSize));
- __ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
-
- __ mov(r1, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate)));
- __ StoreP(fp, MemOperand(r1));
-
- const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
-
- // The bailout id is passed using r10
- __ LoadRR(r4, r10);
-
- // Cleanse the Return address for 31-bit
- __ CleanseP(r14);
-
- // Get the address of the location in the code object (r5)(return
- // address for lazy deoptimization) and compute the fp-to-sp delta in
- // register r6.
- __ LoadRR(r5, r14);
-
- __ la(r6, MemOperand(sp, kSavedRegistersAreaSize));
- __ SubP(r6, fp, r6);
-
- // Allocate a new deoptimizer object.
- // Pass six arguments in r2 to r7.
- __ PrepareCallCFunction(6, r7);
- __ LoadImmP(r2, Operand::Zero());
- Label context_check;
- __ LoadP(r3, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(r3, &context_check);
- __ LoadP(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ bind(&context_check);
- __ LoadImmP(r3, Operand(static_cast<int>(deopt_kind)));
- // r4: bailout id already loaded.
- // r5: code address or 0 already loaded.
- // r6: Fp-to-sp delta.
- // Parm6: isolate is passed on the stack.
- __ mov(r7, Operand(ExternalReference::isolate_address(isolate)));
- __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
-
- // Call Deoptimizer::New().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
- }
-
- // Preserve "deoptimizer" object in register r2 and get the input
- // frame descriptor pointer to r3 (deoptimizer->input_);
- __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
-
- // Copy core registers into FrameDescription::registers_[kNumRegisters].
- // DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
- // __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
- // MemOperand(sp), kNumberOfRegisters * kSystemPointerSize);
- // Copy core registers into FrameDescription::registers_[kNumRegisters].
- // TODO(john.yan): optimize the following code by using mvc instruction
- DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset =
- (i * kSystemPointerSize) + FrameDescription::registers_offset();
- __ LoadP(r4, MemOperand(sp, i * kSystemPointerSize));
- __ StoreP(r4, MemOperand(r3, offset));
- }
-
- int double_regs_offset = FrameDescription::double_registers_offset();
- // Copy double registers to
- // double_registers_[DoubleRegister::kNumRegisters]
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset =
- code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
- // TODO(joransiu): MVC opportunity
- __ LoadDouble(d0, MemOperand(sp, src_offset));
- __ StoreDouble(d0, MemOperand(r3, dst_offset));
- }
-
- // Mark the stack as not iterable for the CPU profiler which won't be able to
- // walk the stack without the return address.
- {
- UseScratchRegisterScope temps(masm);
- Register is_iterable = temps.Acquire();
- Register zero = r6;
- __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
- __ lhi(zero, Operand(0));
- __ StoreByte(zero, MemOperand(is_iterable));
- }
-
- // Remove the saved registers from the stack.
- __ la(sp, MemOperand(sp, kSavedRegistersAreaSize));
-
- // Compute a pointer to the unwinding limit in register r4; that is
- // the first stack slot not part of the input frame.
- __ LoadP(r4, MemOperand(r3, FrameDescription::frame_size_offset()));
- __ AddP(r4, sp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ la(r5, MemOperand(r3, FrameDescription::frame_content_offset()));
- Label pop_loop;
- Label pop_loop_header;
- __ b(&pop_loop_header, Label::kNear);
- __ bind(&pop_loop);
- __ pop(r6);
- __ StoreP(r6, MemOperand(r5, 0));
- __ la(r5, MemOperand(r5, kSystemPointerSize));
- __ bind(&pop_loop_header);
- __ CmpP(r4, sp);
- __ bne(&pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(r2); // Preserve deoptimizer object across call.
- // r2: deoptimizer object; r3: scratch.
- __ PrepareCallCFunction(1, r3);
- // Call Deoptimizer::ComputeOutputFrames().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
- }
- __ pop(r2); // Restore deoptimizer object (class Deoptimizer).
-
- __ LoadP(sp, MemOperand(r2, Deoptimizer::caller_frame_top_offset()));
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
- // Outer loop state: r6 = current "FrameDescription** output_",
- // r3 = one past the last FrameDescription**.
- __ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
- __ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
- __ ShiftLeftP(r3, r3, Operand(kSystemPointerSizeLog2));
- __ AddP(r3, r6, r3);
- __ b(&outer_loop_header, Label::kNear);
-
- __ bind(&outer_push_loop);
- // Inner loop state: r4 = current FrameDescription*, r5 = loop index.
- __ LoadP(r4, MemOperand(r6, 0)); // output_[ix]
- __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
- __ b(&inner_loop_header, Label::kNear);
-
- __ bind(&inner_push_loop);
- __ SubP(r5, Operand(sizeof(intptr_t)));
- __ AddP(r8, r4, r5);
- __ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
- __ push(r8);
-
- __ bind(&inner_loop_header);
- __ CmpP(r5, Operand::Zero());
- __ bne(&inner_push_loop); // test for gt?
-
- __ AddP(r6, r6, Operand(kSystemPointerSize));
- __ bind(&outer_loop_header);
- __ CmpP(r6, r3);
- __ blt(&outer_push_loop);
-
- __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- const DoubleRegister dreg = DoubleRegister::from_code(code);
- int src_offset = code * kDoubleSize + double_regs_offset;
- __ ld(dreg, MemOperand(r3, src_offset));
- }
-
- // Push pc and continuation from the last output frame.
- __ LoadP(r8, MemOperand(r4, FrameDescription::pc_offset()));
- __ push(r8);
- __ LoadP(r8, MemOperand(r4, FrameDescription::continuation_offset()));
- __ push(r8);
-
- // Restore the registers from the last output frame.
- __ LoadRR(r1, r4);
- for (int i = kNumberOfRegisters - 1; i > 0; i--) {
- int offset =
- (i * kSystemPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
- __ LoadP(ToRegister(i), MemOperand(r1, offset));
- }
- }
-
- {
- UseScratchRegisterScope temps(masm);
- Register is_iterable = temps.Acquire();
- Register one = r6;
- __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
- __ lhi(one, Operand(1));
- __ StoreByte(one, MemOperand(is_iterable));
- }
-
- __ pop(ip); // get continuation, leave pc on stack
- __ pop(r14);
- __ Jump(ip);
-
- __ stop();
-}
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 6 + 2;
+const int Deoptimizer::kLazyDeoptExitSize = 6 + 2;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
@@ -257,7 +31,5 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
index ea13361341..6f621ed34e 100644
--- a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
@@ -4,217 +4,14 @@
#if V8_TARGET_ARCH_X64
-#include "src/codegen/macro-assembler.h"
-#include "src/codegen/register-configuration.h"
-#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
-#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
-const int Deoptimizer::kNonLazyDeoptExitSize = 0;
-const int Deoptimizer::kLazyDeoptExitSize = 0;
-
-#define __ masm->
-
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind deopt_kind) {
- NoRootArrayScope no_root_array(masm);
-
- // Save all general purpose registers before messing with them.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
- __ AllocateStackSpace(kDoubleRegsSize);
-
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- XMMRegister xmm_reg = XMMRegister::from_code(code);
- int offset = code * kDoubleSize;
- __ Movsd(Operand(rsp, offset), xmm_reg);
- }
-
- // We push all registers onto the stack, even though we do not need
- // to restore all later.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- Register r = Register::from_code(i);
- __ pushq(r);
- }
-
- const int kSavedRegistersAreaSize =
- kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
-
- __ Store(
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate),
- rbp);
-
- // We use this to keep the value of the fifth argument temporarily.
- // Unfortunately we can't store it directly in r8 (used for passing
- // this on linux), since it is another parameter passing register on windows.
- Register arg5 = r11;
-
- // The bailout id is passed using r13 on the stack.
- __ movq(arg_reg_3, r13);
-
- // Get the address of the location in the code object
- // and compute the fp-to-sp delta in register arg5.
- __ movq(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize));
- __ leaq(arg5, Operand(rsp, kSavedRegistersAreaSize + kPCOnStackSize));
-
- __ subq(arg5, rbp);
- __ negq(arg5);
-
- // Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6);
- __ movq(rax, Immediate(0));
- Label context_check;
- __ movq(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(rdi, &context_check);
- __ movq(rax, Operand(rbp, StandardFrameConstants::kFunctionOffset));
- __ bind(&context_check);
- __ movq(arg_reg_1, rax);
- __ Set(arg_reg_2, static_cast<int>(deopt_kind));
- // Args 3 and 4 are already in the right registers.
-
- // On windows put the arguments on the stack (PrepareCallCFunction
- // has created space for this). On linux pass the arguments in r8 and r9.
-#ifdef V8_TARGET_OS_WIN
- __ movq(Operand(rsp, 4 * kSystemPointerSize), arg5);
- __ LoadAddress(arg5, ExternalReference::isolate_address(isolate));
- __ movq(Operand(rsp, 5 * kSystemPointerSize), arg5);
-#else
- __ movq(r8, arg5);
- __ LoadAddress(r9, ExternalReference::isolate_address(isolate));
-#endif
-
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
- }
- // Preserve deoptimizer object in register rax and get the input
- // frame descriptor pointer.
- __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
-
- // Fill in the input registers.
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset =
- (i * kSystemPointerSize) + FrameDescription::registers_offset();
- __ PopQuad(Operand(rbx, offset));
- }
-
- // Fill in the double input registers.
- int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- __ popq(Operand(rbx, dst_offset));
- }
-
- // Mark the stack as not iterable for the CPU profiler which won't be able to
- // walk the stack without the return address.
- __ movb(__ ExternalReferenceAsOperand(
- ExternalReference::stack_is_iterable_address(isolate)),
- Immediate(0));
-
- // Remove the return address from the stack.
- __ addq(rsp, Immediate(kPCOnStackSize));
-
- // Compute a pointer to the unwinding limit in register rcx; that is
- // the first stack slot not part of the input frame.
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
- __ addq(rcx, rsp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ leaq(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
- Label pop_loop_header;
- __ jmp(&pop_loop_header);
- Label pop_loop;
- __ bind(&pop_loop);
- __ Pop(Operand(rdx, 0));
- __ addq(rdx, Immediate(sizeof(intptr_t)));
- __ bind(&pop_loop_header);
- __ cmpq(rcx, rsp);
- __ j(not_equal, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ pushq(rax);
- __ PrepareCallCFunction(2);
- __ movq(arg_reg_1, rax);
- __ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate));
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 2);
- }
- __ popq(rax);
-
- __ movq(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset()));
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
- // Outer loop state: rax = current FrameDescription**, rdx = one past the
- // last FrameDescription**.
- __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
- __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
- __ leaq(rdx, Operand(rax, rdx, times_system_pointer_size, 0));
- __ jmp(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
- __ movq(rbx, Operand(rax, 0));
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
- __ jmp(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ subq(rcx, Immediate(sizeof(intptr_t)));
- __ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
- __ bind(&inner_loop_header);
- __ testq(rcx, rcx);
- __ j(not_zero, &inner_push_loop);
- __ addq(rax, Immediate(kSystemPointerSize));
- __ bind(&outer_loop_header);
- __ cmpq(rax, rdx);
- __ j(below, &outer_push_loop);
-
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- XMMRegister xmm_reg = XMMRegister::from_code(code);
- int src_offset = code * kDoubleSize + double_regs_offset;
- __ Movsd(xmm_reg, Operand(rbx, src_offset));
- }
-
- // Push pc and continuation from the last output frame.
- __ PushQuad(Operand(rbx, FrameDescription::pc_offset()));
- __ PushQuad(Operand(rbx, FrameDescription::continuation_offset()));
-
- // Push the registers from the last output frame.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset =
- (i * kSystemPointerSize) + FrameDescription::registers_offset();
- __ PushQuad(Operand(rbx, offset));
- }
-
- // Restore the registers from the stack.
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- Register r = Register::from_code(i);
- // Do not restore rsp, simply pop the value into the next register
- // and overwrite this afterwards.
- if (r == rsp) {
- DCHECK_GT(i, 0);
- r = Register::from_code(i - 1);
- }
- __ popq(r);
- }
-
- __ movb(__ ExternalReferenceAsOperand(
- ExternalReference::stack_is_iterable_address(isolate)),
- Immediate(1));
-
- // Return to the continuation point.
- __ ret(0);
-}
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 7;
+const int Deoptimizer::kLazyDeoptExitSize = 7;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
@@ -236,8 +33,6 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/diagnostics/arm/disasm-arm.cc b/deps/v8/src/diagnostics/arm/disasm-arm.cc
index 190640d527..83cdca3c03 100644
--- a/deps/v8/src/diagnostics/arm/disasm-arm.cc
+++ b/deps/v8/src/diagnostics/arm/disasm-arm.cc
@@ -111,6 +111,20 @@ class Decoder {
void DecodeSpecialCondition(Instruction* instr);
+ // F4.1.14 Floating-point data-processing.
+ void DecodeFloatingPointDataProcessing(Instruction* instr);
+ // F4.1.18 Unconditional instructions.
+ void DecodeUnconditional(Instruction* instr);
+ // F4.1.20 Advanced SIMD data-processing.
+ void DecodeAdvancedSIMDDataProcessing(Instruction* instr);
+ // F4.1.21 Advanced SIMD two registers, or three registers of different
+ // lengths.
+ void DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr);
+ // F4.1.23 Memory hints and barriers.
+ void DecodeMemoryHintsAndBarriers(Instruction* instr);
+ // F4.1.24 Advanced SIMD element or structure load/store.
+ void DecodeAdvancedSIMDElementOrStructureLoadStore(Instruction* instr);
+
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
@@ -565,11 +579,18 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintSoftwareInterrupt(instr->SvcValue());
return 3;
} else if (format[1] == 'i') { // 'sign: signed extra loads and stores
- DCHECK(STRING_STARTS_WITH(format, "sign"));
- if (instr->HasSign()) {
- Print("s");
+ if (format[2] == 'g') {
+ DCHECK(STRING_STARTS_WITH(format, "sign"));
+ if (instr->HasSign()) {
+ Print("s");
+ }
+ return 4;
+ } else { // 'size, for Advanced SIMD instructions
+ DCHECK(STRING_STARTS_WITH(format, "size"));
+ int sz = 8 << instr->Bits(21, 20);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sz);
+ return 4;
}
- return 4;
} else if (format[1] == 'p') {
if (format[8] == '_') { // 'spec_reg_fields
DCHECK(STRING_STARTS_WITH(format, "spec_reg_fields"));
@@ -1859,594 +1880,307 @@ static const char* const barrier_option_names[] = {
};
void Decoder::DecodeSpecialCondition(Instruction* instr) {
- switch (instr->SpecialValue()) {
- case 4: {
- int Vd, Vm, Vn;
- if (instr->Bit(6) == 0) {
- Vd = instr->VFPDRegValue(kDoublePrecision);
- Vm = instr->VFPMRegValue(kDoublePrecision);
- Vn = instr->VFPNRegValue(kDoublePrecision);
- } else {
- Vd = instr->VFPDRegValue(kSimd128Precision);
- Vm = instr->VFPMRegValue(kSimd128Precision);
- Vn = instr->VFPNRegValue(kSimd128Precision);
- }
- int size = kBitsPerByte * (1 << instr->Bits(21, 20));
- switch (instr->Bits(11, 8)) {
- case 0x0: {
- if (instr->Bit(4) == 1) {
- // vqadd.s<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vqadd.s%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0x1: {
- if (instr->Bits(21, 20) == 2 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- if (Vm == Vn) {
- // vmov Qd, Qm
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmov q%d, q%d", Vd, Vm);
- } else {
- // vorr Qd, Qm, Qn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vorr q%d, q%d, q%d", Vd, Vn, Vm);
- }
- } else if (instr->Bits(21, 20) == 1 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- // vbic Qd, Qn, Qm
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vbic q%d, q%d, q%d", Vd, Vn, Vm);
- } else if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- // vand Qd, Qm, Qn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vand q%d, q%d, q%d", Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0x2: {
- if (instr->Bit(4) == 1) {
- // vqsub.s<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vqsub.s%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0x3: {
- const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
- // vcge/vcgt.s<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d",
- op, size, Vd, Vn, Vm);
- break;
- }
- case 0x4: {
- if (instr->Bit(4) == 0) {
- // vshl.s<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vshl.s%d q%d, q%d, q%d", size, Vd, Vm, Vn);
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0x6: {
- // vmin/vmax.s<size> Qd, Qm, Qn.
- const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d",
- op, size, Vd, Vn, Vm);
- break;
+ int op0 = instr->Bits(25, 24);
+ int op1 = instr->Bits(11, 9);
+ int op2 = instr->Bit(4);
+
+ if (instr->Bit(27) == 0) {
+ DecodeUnconditional(instr);
+ } else if ((instr->Bits(27, 26) == 0b11) && (op0 == 0b10) &&
+ ((op1 >> 1) == 0b10) && !op2) {
+ DecodeFloatingPointDataProcessing(instr);
+ } else {
+ Unknown(instr);
+ }
+}
+
+void Decoder::DecodeFloatingPointDataProcessing(Instruction* instr) {
+ // Floating-point data processing, F4.1.14.
+ int op0 = instr->Bits(23, 20);
+ int op1 = instr->Bits(19, 16);
+ int op2 = instr->Bits(9, 8);
+ int op3 = instr->Bit(6);
+ if (((op0 & 0b1000) == 0) && op2 && !op3) {
+ // Floating-point conditional select.
+ // VSEL* (floating-point)
+ bool dp_operation = (instr->SzValue() == 1);
+ switch (instr->Bits(21, 20)) {
+ case 0x0:
+ if (dp_operation) {
+ Format(instr, "vseleq.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vseleq.f32 'Sd, 'Sn, 'Sm");
}
- case 0x8: {
- const char* op = (instr->Bit(4) == 0) ? "vadd" : "vtst";
- // vadd/vtst.i<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.i%d q%d, q%d, q%d",
- op, size, Vd, Vn, Vm);
- break;
+ break;
+ case 0x1:
+ if (dp_operation) {
+ Format(instr, "vselvs.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vselvs.f32 'Sd, 'Sn, 'Sm");
}
- case 0x9: {
- if (instr->Bit(6) == 1 && instr->Bit(4) == 1) {
- // vmul.i<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmul.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
+ break;
+ case 0x2:
+ if (dp_operation) {
+ Format(instr, "vselge.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vselge.f32 'Sd, 'Sn, 'Sm");
}
- case 0xA: {
- // vpmin/vpmax.s<size> Dd, Dm, Dn.
- const char* op = instr->Bit(4) == 1 ? "vpmin" : "vpmax";
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d d%d, d%d, d%d",
- op, size, Vd, Vn, Vm);
- break;
+ break;
+ case 0x3:
+ if (dp_operation) {
+ Format(instr, "vselgt.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vselgt.f32 'Sd, 'Sn, 'Sm");
}
- case 0xB: {
- // vpadd.i<size> Dd, Dm, Dn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vpadd.i%d d%d, d%d, d%d",
- size, Vd, Vn, Vm);
- break;
+ break;
+ default:
+ UNREACHABLE(); // Case analysis is exhaustive.
+ break;
+ }
+ } else if (instr->Opc1Value() == 0x4 && op2) {
+ // Floating-point minNum/maxNum.
+ // VMAXNM, VMINNM (floating-point)
+ if (instr->SzValue() == 0x1) {
+ if (instr->Bit(6) == 0x1) {
+ Format(instr, "vminnm.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vmaxnm.f64 'Dd, 'Dn, 'Dm");
+ }
+ } else {
+ if (instr->Bit(6) == 0x1) {
+ Format(instr, "vminnm.f32 'Sd, 'Sn, 'Sm");
+ } else {
+ Format(instr, "vmaxnm.f32 'Sd, 'Sn, 'Sm");
+ }
+ }
+ } else if (instr->Opc1Value() == 0x7 && (op1 >> 3) && op2 && op3) {
+ // Floating-point directed convert to integer.
+ // VRINTA, VRINTN, VRINTP, VRINTM (floating-point)
+ bool dp_operation = (instr->SzValue() == 1);
+ int rounding_mode = instr->Bits(17, 16);
+ switch (rounding_mode) {
+ case 0x0:
+ if (dp_operation) {
+ Format(instr, "vrinta.f64.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vrinta.f32.f32 'Sd, 'Sm");
}
- case 0xD: {
- if (instr->Bit(4) == 0) {
- const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub";
- // vadd/vsub.f32 Qd, Qm, Qn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
+ break;
+ case 0x1:
+ if (dp_operation) {
+ Format(instr, "vrintn.f64.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vrintn.f32.f32 'Sd, 'Sm");
}
- case 0xE: {
- if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 0) {
- // vceq.f32 Qd, Qm, Qn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vceq.f32 q%d, q%d, q%d", Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
+ break;
+ case 0x2:
+ if (dp_operation) {
+ Format(instr, "vrintp.f64.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vrintp.f32.f32 'Sd, 'Sm");
}
- case 0xF: {
- if (instr->Bit(20) == 0 && instr->Bit(6) == 1) {
- if (instr->Bit(4) == 1) {
- // vrecps/vrsqrts.f32 Qd, Qm, Qn.
- const char* op = instr->Bit(21) == 0 ? "vrecps" : "vrsqrts";
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
- } else {
- // vmin/max.f32 Qd, Qm, Qn.
- const char* op = instr->Bit(21) == 1 ? "vmin" : "vmax";
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
- }
- } else {
- Unknown(instr);
- }
- break;
+ break;
+ case 0x3:
+ if (dp_operation) {
+ Format(instr, "vrintm.f64.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vrintm.f32.f32 'Sd, 'Sm");
}
- default:
- Unknown(instr);
- break;
- }
- break;
+ break;
+ default:
+ UNREACHABLE(); // Case analysis is exhaustive.
+ break;
}
- case 5:
- if (instr->Bit(23) == 1 && instr->Bits(21, 19) == 0 &&
- instr->Bit(7) == 0 && instr->Bit(4) == 1) {
- // One register and a modified immediate value, see ARM DDI 0406C.d
- // A7.4.6.
- DecodeVmovImmediate(instr);
- } else if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
- (instr->Bit(4) == 1)) {
- // vmovl signed
- if ((instr->VdValue() & 1) != 0) Unknown(instr);
- int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
- int Vm = (instr->Bit(5) << 4) | instr->VmValue();
- int imm3 = instr->Bits(21, 19);
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmovl.s%d q%d, d%d", imm3 * 8, Vd, Vm);
- } else if (instr->Bits(21, 20) == 3 && instr->Bit(4) == 0) {
- // vext.8 Qd, Qm, Qn, imm4
- int imm4 = instr->Bits(11, 8);
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vext.8 q%d, q%d, q%d, #%d",
- Vd, Vn, Vm, imm4);
- } else if (instr->Bits(11, 8) == 5 && instr->Bit(4) == 1) {
- // vshl.i<size> Qd, Qm, shift
- int imm7 = instr->Bits(21, 16);
- if (instr->Bit(7) != 0) imm7 += 64;
- int size = base::bits::RoundDownToPowerOfTwo32(imm7);
- int shift = imm7 - size;
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vshl.i%d q%d, q%d, #%d",
- size, Vd, Vm, shift);
- } else if (instr->Bits(11, 8) == 0 && instr->Bit(4) == 1) {
+ } else {
+ Unknown(instr);
+ }
+ // One class of decoding is missing here: Floating-point extraction and
+ // insertion, but it is not used in V8 now, and thus omitted.
+}
+
+void Decoder::DecodeUnconditional(Instruction* instr) {
+ // This follows the decoding in F4.1.18 Unconditional instructions.
+ int op0 = instr->Bits(26, 25);
+ int op1 = instr->Bit(20);
+
+ // Four classes of decoding:
+ // - Miscellaneous (omitted, no instructions used in V8).
+ // - Advanced SIMD data-processing.
+ // - Memory hints and barriers.
+ // - Advanced SIMD element or structure load/store.
+ if (op0 == 0b01) {
+ DecodeAdvancedSIMDDataProcessing(instr);
+ } else if ((op0 & 0b10) == 0b10 && op1) {
+ DecodeMemoryHintsAndBarriers(instr);
+ } else if (op0 == 0b10 && !op1) {
+ DecodeAdvancedSIMDElementOrStructureLoadStore(instr);
+ } else {
+ Unknown(instr);
+ }
+}
+
+void Decoder::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
+ int op0 = instr->Bit(23);
+ int op1 = instr->Bit(4);
+ if (op0 == 0) {
+ // Advanced SIMD three registers of same length.
+ int Vd, Vm, Vn;
+ if (instr->Bit(6) == 0) {
+ Vd = instr->VFPDRegValue(kDoublePrecision);
+ Vm = instr->VFPMRegValue(kDoublePrecision);
+ Vn = instr->VFPNRegValue(kDoublePrecision);
+ } else {
+ Vd = instr->VFPDRegValue(kSimd128Precision);
+ Vm = instr->VFPMRegValue(kSimd128Precision);
+ Vn = instr->VFPNRegValue(kSimd128Precision);
+ }
+
+ int u = instr->Bit(24);
+ int opc = instr->Bits(11, 8);
+ int q = instr->Bit(6);
+ int sz = instr->Bits(21, 20);
+
+ if (!u && opc == 0 && op1) {
+ Format(instr, "vqadd.s'size 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 1 && sz == 2 && q && op1) {
+ if (Vm == Vn) {
+ Format(instr, "vmov 'Qd, 'Qm");
+ } else {
+ Format(instr, "vorr 'Qd, 'Qn, 'Qm");
+ }
+ } else if (!u && opc == 1 && sz == 1 && q && op1) {
+ Format(instr, "vbic 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 1 && sz == 0 && q && op1) {
+ Format(instr, "vand 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 2 && op1) {
+ Format(instr, "vqsub.s'size 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 3 && op1) {
+ Format(instr, "vcge.s'size 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 3 && !op1) {
+ Format(instr, "vcgt.s'size 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 4 && !op1) {
+ Format(instr, "vshl.s'size 'Qd, 'Qm, 'Qn");
+ } else if (!u && opc == 6 && op1) {
+ Format(instr, "vmin.s'size 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 6 && !op1) {
+ Format(instr, "vmax.s'size 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 8 && op1) {
+ Format(instr, "vtst.i'size 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 8 && !op1) {
+ Format(instr, "vadd.i'size 'Qd, 'Qn, 'Qm");
+ } else if (opc == 9 && op1) {
+ Format(instr, "vmul.i'size 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 0xA && op1) {
+ Format(instr, "vpmin.s'size 'Dd, 'Dn, 'Dm");
+ } else if (!u && opc == 0xA && !op1) {
+ Format(instr, "vpmax.s'size 'Dd, 'Dn, 'Dm");
+ } else if (!u && opc == 0xB) {
+ Format(instr, "vpadd.i'size 'Dd, 'Dn, 'Dm");
+ } else if (!u && !(sz >> 1) && opc == 0xD && !op1) {
+ Format(instr, "vadd.f32 'Qd, 'Qn, 'Qm");
+ } else if (!u && (sz >> 1) && opc == 0xD && !op1) {
+ Format(instr, "vsub.f32 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 0xE && !sz && !op1) {
+ Format(instr, "vceq.f32 'Qd, 'Qn, 'Qm");
+ } else if (!u && !(sz >> 1) && opc == 0xF && op1) {
+ Format(instr, "vrecps.f32 'Qd, 'Qn, 'Qm");
+ } else if (!u && (sz >> 1) && opc == 0xF && op1) {
+ Format(instr, "vrsqrts.f32 'Qd, 'Qn, 'Qm");
+ } else if (!u && !(sz >> 1) && opc == 0xF && !op1) {
+ Format(instr, "vmax.f32 'Qd, 'Qn, 'Qm");
+ } else if (!u && (sz >> 1) && opc == 0xF && !op1) {
+ Format(instr, "vmin.f32 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 0 && op1) {
+ Format(instr, "vqadd.u'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 1 && sz == 1 && op1) {
+ Format(instr, "vbsl 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 1 && sz == 0 && q && op1) {
+ Format(instr, "veor 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 1 && sz == 0 && !q && op1) {
+ Format(instr, "veor 'Dd, 'Dn, 'Dm");
+ } else if (u && opc == 1 && !op1) {
+ Format(instr, "vrhadd.u'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 2 && op1) {
+ Format(instr, "vqsub.u'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 3 && op1) {
+ Format(instr, "vcge.u'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 3 && !op1) {
+ Format(instr, "vcgt.u'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 4 && !op1) {
+ Format(instr, "vshl.u'size 'Qd, 'Qm, 'Qn");
+ } else if (u && opc == 6 && op1) {
+ Format(instr, "vmin.u'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 6 && !op1) {
+ Format(instr, "vmax.u'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 8 && op1) {
+ Format(instr, "vceq.i'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 8 && !op1) {
+ Format(instr, "vsub.i'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 0xA && op1) {
+ Format(instr, "vpmin.u'size 'Dd, 'Dn, 'Dm");
+ } else if (u && opc == 0xA && !op1) {
+ Format(instr, "vpmax.u'size 'Dd, 'Dn, 'Dm");
+ } else if (u && opc == 0xD && sz == 0 && q && op1) {
+ Format(instr, "vmul.f32 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 0xD && sz == 0 && !q && !op1) {
+ Format(instr, "vpadd.f32 'Dd, 'Dn, 'Dm");
+ } else if (u && opc == 0xE && !(sz >> 1) && !op1) {
+ Format(instr, "vcge.f32 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 0xE && (sz >> 1) && !op1) {
+ Format(instr, "vcgt.f32 'Qd, 'Qn, 'Qm");
+ } else {
+ Unknown(instr);
+ }
+ } else if (op0 == 1 && op1 == 0) {
+ DecodeAdvancedSIMDTwoOrThreeRegisters(instr);
+ } else if (op0 == 1 && op1 == 1) {
+ // Advanced SIMD shifts and immediate generation.
+ if (instr->Bits(21, 19) == 0 && instr->Bit(7) == 0) {
+ // Advanced SIMD one register and modified immediate.
+ DecodeVmovImmediate(instr);
+ } else {
+ // Advanced SIMD two registers and shift amount.
+ int u = instr->Bit(24);
+ int imm3H = instr->Bits(21, 19);
+ int imm3L = instr->Bits(18, 16);
+ int opc = instr->Bits(11, 8);
+ int l = instr->Bit(7);
+ int q = instr->Bit(6);
+ int imm3H_L = imm3H << 1 | l;
+
+ if (imm3H_L != 0 && opc == 0) {
// vshr.s<size> Qd, Qm, shift
- int imm7 = instr->Bits(21, 16);
- if (instr->Bit(7) != 0) imm7 += 64;
+ int imm7 = (l << 6) | instr->Bits(21, 16);
int size = base::bits::RoundDownToPowerOfTwo32(imm7);
int shift = 2 * size - imm7;
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vshr.s%d q%d, q%d, #%d",
- size, Vd, Vm, shift);
- } else if (instr->Bits(11, 8) == 0xC && instr->Bit(6) == 0 &&
- instr->Bit(4) == 0) {
- // vmull.s<size> Qd, Dn, Dm
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vshr.%s%d q%d, q%d, #%d",
+ u ? "u" : "s", size, Vd, Vm, shift);
+ } else if (imm3H_L != 0 && imm3L == 0 && opc == 0b1010 && !q) {
+ // vmovl
+ if ((instr->VdValue() & 1) != 0) Unknown(instr);
int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
- int size = 8 << instr->Bits(21, 20);
+ int imm3H = instr->Bits(21, 19);
out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vmull.s%d q%d, d%d, d%d",
- size, Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
- case 6: {
- int Vd, Vm, Vn;
- if (instr->Bit(6) == 0) {
- Vd = instr->VFPDRegValue(kDoublePrecision);
- Vm = instr->VFPMRegValue(kDoublePrecision);
- Vn = instr->VFPNRegValue(kDoublePrecision);
- } else {
- Vd = instr->VFPDRegValue(kSimd128Precision);
- Vm = instr->VFPMRegValue(kSimd128Precision);
- Vn = instr->VFPNRegValue(kSimd128Precision);
- }
- int size = kBitsPerByte * (1 << instr->Bits(21, 20));
- switch (instr->Bits(11, 8)) {
- case 0x0: {
- if (instr->Bit(4) == 1) {
- // vqadd.u<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vqadd.u%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0x1: {
- if (instr->Bits(21, 20) == 1 && instr->Bit(4) == 1) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vbsl q%d, q%d, q%d", Vd, Vn, Vm);
- } else if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 1) {
- if (instr->Bit(6) == 0) {
- // veor Dd, Dn, Dm
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "veor d%d, d%d, d%d", Vd, Vn, Vm);
-
- } else {
- // veor Qd, Qn, Qm
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "veor q%d, q%d, q%d", Vd, Vn, Vm);
- }
- } else if (instr->Bit(4) == 0) {
- if (instr->Bit(6) == 1) {
- // vrhadd.u<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vrhadd.u%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- } else {
- // vrhadd.u<size> Dd, Dm, Dn.
- Unknown(instr);
- }
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0x2: {
- if (instr->Bit(4) == 1) {
- // vqsub.u<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vqsub.u%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0x3: {
- const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
- // vcge/vcgt.u<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d",
- op, size, Vd, Vn, Vm);
- break;
- }
- case 0x4: {
- if (instr->Bit(4) == 0) {
- // vshl.u<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vshl.u%d q%d, q%d, q%d", size, Vd, Vm, Vn);
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0x6: {
- // vmin/vmax.u<size> Qd, Qm, Qn.
- const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d",
- op, size, Vd, Vn, Vm);
- break;
- }
- case 0x8: {
- if (instr->Bit(4) == 0) {
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vsub.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- } else {
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vceq.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- }
- break;
- }
- case 0xA: {
- // vpmin/vpmax.u<size> Dd, Dm, Dn.
- const char* op = instr->Bit(4) == 1 ? "vpmin" : "vpmax";
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d d%d, d%d, d%d",
- op, size, Vd, Vn, Vm);
- break;
- }
- case 0xD: {
- if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- // vmul.f32 Qd, Qm, Qn
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmul.f32 q%d, q%d, q%d", Vd, Vn, Vm);
- } else if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 0 &&
- instr->Bit(4) == 0) {
- // vpadd.f32 Dd, Dm, Dn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vpadd.f32 d%d, d%d, d%d", Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0xE: {
- if (instr->Bit(20) == 0 && instr->Bit(4) == 0) {
- const char* op = (instr->Bit(21) == 0) ? "vcge" : "vcgt";
- // vcge/vcgt.f32 Qd, Qm, Qn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
- }
- default:
- Unknown(instr);
- break;
- }
- break;
- }
- case 7:
- if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
- (instr->Bit(4) == 1)) {
- // vmovl unsigned
- if ((instr->VdValue() & 1) != 0) Unknown(instr);
- int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
- int Vm = (instr->Bit(5) << 4) | instr->VmValue();
- int imm3 = instr->Bits(21, 19);
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmovl.u%d q%d, d%d", imm3 * 8, Vd, Vm);
- } else if (instr->Opc1Value() == 7 && instr->Bit(4) == 0) {
- if (instr->Bits(11, 7) == 0x18) {
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- int imm4 = instr->Bits(19, 16);
- int size = 0, index = 0;
- if ((imm4 & 0x1) != 0) {
- size = 8;
- index = imm4 >> 1;
- } else if ((imm4 & 0x2) != 0) {
- size = 16;
- index = imm4 >> 2;
- } else {
- size = 32;
- index = imm4 >> 3;
- }
- if (instr->Bit(6) == 0) {
- int Vd = instr->VFPDRegValue(kDoublePrecision);
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vdup.%i d%d, d%d[%d]",
- size, Vd, Vm, index);
- } else {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vdup.%i q%d, d%d[%d]",
- size, Vd, Vm, index);
- }
- } else if (instr->Bits(11, 10) == 0x2) {
- int Vd = instr->VFPDRegValue(kDoublePrecision);
- int Vn = instr->VFPNRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- int len = instr->Bits(9, 8);
- NeonListOperand list(DwVfpRegister::from_code(Vn), len + 1);
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s d%d, ",
- instr->Bit(6) == 0 ? "vtbl.8" : "vtbx.8", Vd);
- FormatNeonList(Vn, list.type());
- Print(", ");
- PrintDRegister(Vm);
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 8) == 0x2 &&
- instr->Bits(7, 6) != 0) {
- // vqmov{u}n.<type><size> Dd, Qm.
- int Vd = instr->VFPDRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int op = instr->Bits(7, 6);
- const char* name = op == 0b01 ? "vqmovun" : "vqmovn";
- char type = op == 0b11 ? 'u' : 's';
- int size = 2 * kBitsPerByte * (1 << instr->Bits(19, 18));
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.%c%i d%d, q%d", name,
- type, size, Vd, Vm);
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bit(10) == 1) {
- // NEON vrintm, vrintn, vrintp, vrintz.
- bool dp_op = instr->Bit(6) == 0;
- int rounding_mode = instr->Bits(9, 7);
- switch (rounding_mode) {
- case 0:
- if (dp_op) {
- Format(instr, "vrintn.f32 'Dd, 'Dm");
- } else {
- Format(instr, "vrintn.f32 'Qd, 'Qm");
- }
- break;
- case 3:
- if (dp_op) {
- Format(instr, "vrintz.f32 'Dd, 'Dm");
- } else {
- Format(instr, "vrintz.f32 'Qd, 'Qm");
- }
- break;
- case 5:
- if (dp_op) {
- Format(instr, "vrintm.f32 'Dd, 'Dm");
- } else {
- Format(instr, "vrintm.f32 'Qd, 'Qm");
- }
- break;
- case 7:
- if (dp_op) {
- Format(instr, "vrintp.f32 'Dd, 'Dm");
- } else {
- Format(instr, "vrintp.f32 'Qd, 'Qm");
- }
- break;
- default:
- UNIMPLEMENTED();
- }
- } else {
- int Vd, Vm;
- if (instr->Bit(6) == 0) {
- Vd = instr->VFPDRegValue(kDoublePrecision);
- Vm = instr->VFPMRegValue(kDoublePrecision);
- } else {
- Vd = instr->VFPDRegValue(kSimd128Precision);
- Vm = instr->VFPMRegValue(kSimd128Precision);
- }
- if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0) {
- if (instr->Bit(6) == 0) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vswp d%d, d%d", Vd, Vm);
- } else {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vswp q%d, q%d", Vd, Vm);
- }
- } else if (instr->Bits(19, 16) == 0 && instr->Bits(11, 6) == 0x17) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmvn q%d, q%d", Vd, Vm);
- } else if (instr->Bits(19, 16) == 0xB && instr->Bits(11, 9) == 0x3 &&
- instr->Bit(6) == 1) {
- const char* suffix = nullptr;
- int op = instr->Bits(8, 7);
- switch (op) {
- case 0:
- suffix = "f32.s32";
- break;
- case 1:
- suffix = "f32.u32";
- break;
- case 2:
- suffix = "s32.f32";
- break;
- case 3:
- suffix = "u32.f32";
- break;
- }
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vcvt.%s q%d, q%d", suffix, Vd, Vm);
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 8) == 0x1) {
- int size = kBitsPerByte * (1 << instr->Bits(19, 18));
- const char* op = instr->Bit(7) != 0 ? "vzip" : "vuzp";
- if (instr->Bit(6) == 0) {
- // vzip/vuzp.<size> Dd, Dm.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.%d d%d, d%d", op, size, Vd, Vm);
- } else {
- // vzip/vuzp.<size> Qd, Qm.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.%d q%d, q%d", op, size, Vd, Vm);
- }
- } else if (instr->Bits(17, 16) == 0 && instr->Bits(11, 9) == 0 &&
- instr->Bit(6) == 1) {
- int size = kBitsPerByte * (1 << instr->Bits(19, 18));
- int op = kBitsPerByte
- << (static_cast<int>(Neon64) - instr->Bits(8, 7));
- // vrev<op>.<size> Qd, Qm.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vrev%d.%d q%d, q%d", op, size, Vd, Vm);
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0x1) {
- int size = kBitsPerByte * (1 << instr->Bits(19, 18));
- if (instr->Bit(6) == 0) {
- // vtrn.<size> Dd, Dm.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vtrn.%d d%d, d%d", size, Vd, Vm);
- } else {
- // vtrn.<size> Qd, Qm.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vtrn.%d q%d, q%d", size, Vd, Vm);
- }
- } else if (instr->Bits(17, 16) == 0x1 && instr->Bit(11) == 0 &&
- instr->Bit(6) == 1) {
- int size = kBitsPerByte * (1 << instr->Bits(19, 18));
- char type = instr->Bit(10) != 0 ? 'f' : 's';
- if (instr->Bits(9, 6) == 0xD) {
- // vabs<type>.<size> Qd, Qm.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vabs.%c%d q%d, q%d",
- type, size, Vd, Vm);
- } else if (instr->Bits(9, 6) == 0xF) {
- // vneg<type>.<size> Qd, Qm.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vneg.%c%d q%d, q%d",
- type, size, Vd, Vm);
- } else {
- Unknown(instr);
- }
- } else if (instr->Bits(19, 18) == 0x2 && instr->Bits(11, 8) == 0x5 &&
- instr->Bit(6) == 1) {
- // vrecpe/vrsqrte.f32 Qd, Qm.
- const char* op = instr->Bit(7) == 0 ? "vrecpe" : "vrsqrte";
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.f32 q%d, q%d", op, Vd, Vm);
- } else {
- Unknown(instr);
- }
- }
- } else if (instr->Bits(11, 8) == 0 && instr->Bit(4) == 1 &&
- instr->Bit(6) == 1) {
- // vshr.u<size> Qd, Qm, shift
- int imm7 = instr->Bits(21, 16);
- if (instr->Bit(7) != 0) imm7 += 64;
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vmovl.%s%d q%d, d%d",
+ u ? "u" : "s", imm3H * 8, Vd, Vm);
+ } else if (!u && imm3H_L != 0 && opc == 0b0101) {
+ // vshl.i<size> Qd, Qm, shift
+ int imm7 = (l << 6) | instr->Bits(21, 16);
int size = base::bits::RoundDownToPowerOfTwo32(imm7);
- int shift = 2 * size - imm7;
+ int shift = imm7 - size;
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vshr.u%d q%d, q%d, #%d",
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vshl.i%d q%d, q%d, #%d",
size, Vd, Vm, shift);
- } else if (instr->Bit(10) == 1 && instr->Bit(6) == 0 &&
- instr->Bit(4) == 1) {
+ } else if (u && imm3H_L != 0 && (opc & 0b1110) == 0b0100) {
// vsli.<size> Dd, Dm, shift
// vsri.<size> Dd, Dm, shift
- int imm7 = instr->Bits(21, 16);
- if (instr->Bit(7) != 0) imm7 += 64;
+ int imm7 = (l << 6) | instr->Bits(21, 16);
int size = base::bits::RoundDownToPowerOfTwo32(imm7);
int shift;
char direction;
@@ -2462,208 +2196,299 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vs%ci.%d d%d, d%d, #%d",
direction, size, Vd, Vm, shift);
- } else if (instr->Bits(11, 8) == 0x8 && instr->Bit(6) == 0 &&
- instr->Bit(4) == 0) {
- // vmlal.u<size> <Qd>, <Dn>, <Dm>
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- int size = 8 << instr->Bits(21, 20);
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vmlal.u%d q%d, d%d, d%d",
- size, Vd, Vn, Vm);
- } else if (instr->Bits(11, 8) == 0xC && instr->Bit(6) == 0 &&
- instr->Bit(4) == 0) {
- // vmull.u<size> <Qd>, <Dn>, <Dm>
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- int size = 8 << instr->Bits(21, 20);
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vmull.u%d q%d, d%d, d%d",
- size, Vd, Vn, Vm);
- } else if (instr->Bits(21, 19) == 0 && instr->Bit(7) == 0 &&
- instr->Bit(4) == 1) {
- // One register and a modified immediate value, see ARM DDI 0406C.d
- // A7.4.6.
- DecodeVmovImmediate(instr);
- } else {
- Unknown(instr);
}
- break;
- case 8:
- if (instr->Bits(21, 20) == 0) {
- // vst1
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
- int Rn = instr->VnValue();
- int type = instr->Bits(11, 8);
- int size = instr->Bits(7, 6);
- int align = instr->Bits(5, 4);
- int Rm = instr->VmValue();
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "vst1.%d ",
- (1 << size) << 3);
- FormatNeonList(Vd, type);
- Print(", ");
- FormatNeonMemory(Rn, align, Rm);
- } else if (instr->Bits(21, 20) == 2) {
- // vld1
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
- int Rn = instr->VnValue();
- int type = instr->Bits(11, 8);
- int size = instr->Bits(7, 6);
- int align = instr->Bits(5, 4);
- int Rm = instr->VmValue();
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "vld1.%d ",
- (1 << size) << 3);
- FormatNeonList(Vd, type);
- Print(", ");
- FormatNeonMemory(Rn, align, Rm);
+ }
+ } else {
+ Unknown(instr);
+ }
+}
+
+void Decoder::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
+ // Advanced SIMD two registers, or three registers of different lengths.
+ int op0 = instr->Bit(24);
+ int op1 = instr->Bits(21, 20);
+ int op2 = instr->Bits(11, 10);
+ int op3 = instr->Bit(6);
+ if (!op0 && op1 == 0b11) {
+ // vext.8 Qd, Qm, Qn, imm4
+ int imm4 = instr->Bits(11, 8);
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vext.8 q%d, q%d, q%d, #%d", Vd, Vn, Vm, imm4);
+ } else if (op0 && op1 == 0b11 && ((op2 >> 1) == 0)) {
+ // Advanced SIMD two registers misc
+ int size = instr->Bits(19, 18);
+ int opc1 = instr->Bits(17, 16);
+ int opc2 = instr->Bits(10, 7);
+ int q = instr->Bit(6);
+
+ int Vd, Vm;
+ if (q) {
+ Vd = instr->VFPDRegValue(kSimd128Precision);
+ Vm = instr->VFPMRegValue(kSimd128Precision);
+ } else {
+ Vd = instr->VFPDRegValue(kDoublePrecision);
+ Vm = instr->VFPMRegValue(kDoublePrecision);
+ }
+
+ int esize = kBitsPerByte * (1 << size);
+ if (opc1 == 0 && (opc2 >> 2) == 0) {
+ int op = kBitsPerByte << (static_cast<int>(Neon64) - instr->Bits(8, 7));
+ // vrev<op>.<esize> Qd, Qm.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vrev%d.%d q%d, q%d", op, esize, Vd, Vm);
+ } else if (size == 0 && opc1 == 0b10 && opc2 == 0) {
+ Format(instr, q ? "vswp 'Qd, 'Qm" : "vswp 'Dd, 'Dm");
+ } else if (opc1 == 0 && opc2 == 0b1011) {
+ Format(instr, "vmvn 'Qd, 'Qm");
+ } else if (opc1 == 0b01 && (opc2 & 0b0111) == 0b110) {
+ // vabs<type>.<esize> Qd, Qm.
+ char type = instr->Bit(10) != 0 ? 'f' : 's';
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vabs.%c%d q%d, q%d", type, esize, Vd, Vm);
+ } else if (opc1 == 0b01 && (opc2 & 0b0111) == 0b111) {
+ // vneg<type>.<esize> Qd, Qm.
+ char type = instr->Bit(10) != 0 ? 'f' : 's';
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vneg.%c%d q%d, q%d", type, esize, Vd, Vm);
+ } else if (opc1 == 0b10 && opc2 == 0b0001) {
+ if (q) {
+ // vtrn.<esize> Qd, Qm.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vtrn.%d q%d, q%d", esize, Vd, Vm);
} else {
- Unknown(instr);
+ // vtrn.<esize> Dd, Dm.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vtrn.%d d%d, d%d", esize, Vd, Vm);
}
- break;
- case 0xA:
- case 0xB:
- if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xF)) {
- const char* rn_name = converter_.NameOfCPURegister(instr->Bits(19, 16));
- int offset = instr->Bits(11, 0);
- if (offset == 0) {
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "pld [%s]", rn_name);
- } else if (instr->Bit(23) == 0) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "pld [%s, #-%d]", rn_name, offset);
- } else {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "pld [%s, #+%d]", rn_name, offset);
- }
- } else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
- int option = instr->Bits(3, 0);
- switch (instr->Bits(7, 4)) {
- case 4:
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "dsb %s",
- barrier_option_names[option]);
- break;
- case 5:
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "dmb %s",
- barrier_option_names[option]);
- break;
- case 6:
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "isb %s",
- barrier_option_names[option]);
- break;
- default:
- Unknown(instr);
- }
+ } else if (opc1 == 0b10 && (opc2 & 0b1110) == 0b0010) {
+ const char* op = instr->Bit(7) != 0 ? "vzip" : "vuzp";
+ if (q) {
+ // vzip/vuzp.<esize> Qd, Qm.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s.%d q%d, q%d", op, esize, Vd, Vm);
} else {
- Unknown(instr);
+ // vzip/vuzp.<esize> Dd, Dm.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s.%d d%d, d%d", op, esize, Vd, Vm);
+ }
+ } else if (opc1 == 0b10 && (opc2 & 0b1110) == 0b0100) {
+ // vqmov{u}n.<type><esize> Dd, Qm.
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int op = instr->Bits(7, 6);
+ const char* name = op == 0b01 ? "vqmovun" : "vqmovn";
+ char type = op == 0b11 ? 'u' : 's';
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s.%c%i d%d, q%d", name,
+ type, esize << 1, Vd, Vm);
+ } else if (opc1 == 0b10 && opc2 == 0b1000) {
+ Format(instr, q ? "vrintn.f32 'Qd, 'Qm" : "vrintn.f32 'Dd, 'Dm");
+ } else if (opc1 == 0b10 && opc2 == 0b1011) {
+ Format(instr, q ? "vrintz.f32 'Qd, 'Qm" : "vrintz.f32 'Dd, 'Dm");
+ } else if (opc1 == 0b10 && opc2 == 0b1101) {
+ Format(instr, q ? "vrintm.f32 'Qd, 'Qm" : "vrintm.f32 'Qd, 'Qm");
+ } else if (opc1 == 0b10 && opc2 == 0b1111) {
+ Format(instr, q ? "vrintp.f32 'Qd, 'Qm" : "vrintp.f32 'Qd, 'Qm");
+ } else if (opc1 == 0b11 && (opc2 & 0b1101) == 0b1000) {
+ Format(instr, "vrecpe.f32 'Qd, 'Qm");
+ } else if (opc1 == 0b11 && (opc2 & 0b1101) == 0b1001) {
+ Format(instr, "vrsqrte.f32 'Qd, 'Qm");
+ } else if (opc1 == 0b11 && (opc2 & 0b1100) == 0b1100) {
+ const char* suffix = nullptr;
+ int op = instr->Bits(8, 7);
+ switch (op) {
+ case 0:
+ suffix = "f32.s32";
+ break;
+ case 1:
+ suffix = "f32.u32";
+ break;
+ case 2:
+ suffix = "s32.f32";
+ break;
+ case 3:
+ suffix = "u32.f32";
+ break;
}
- break;
- case 0x1D:
- if (instr->Opc1Value() == 0x7 && instr->Bits(19, 18) == 0x2 &&
- instr->Bits(11, 9) == 0x5 && instr->Bits(7, 6) == 0x1 &&
- instr->Bit(4) == 0x0) {
- // VRINTA, VRINTN, VRINTP, VRINTM (floating-point)
- bool dp_operation = (instr->SzValue() == 1);
- int rounding_mode = instr->Bits(17, 16);
- switch (rounding_mode) {
- case 0x0:
- if (dp_operation) {
- Format(instr, "vrinta.f64.f64 'Dd, 'Dm");
- } else {
- Format(instr, "vrinta.f32.f32 'Sd, 'Sm");
- }
- break;
- case 0x1:
- if (dp_operation) {
- Format(instr, "vrintn.f64.f64 'Dd, 'Dm");
- } else {
- Format(instr, "vrintn.f32.f32 'Sd, 'Sm");
- }
- break;
- case 0x2:
- if (dp_operation) {
- Format(instr, "vrintp.f64.f64 'Dd, 'Dm");
- } else {
- Format(instr, "vrintp.f32.f32 'Sd, 'Sm");
- }
- break;
- case 0x3:
- if (dp_operation) {
- Format(instr, "vrintm.f64.f64 'Dd, 'Dm");
- } else {
- Format(instr, "vrintm.f32.f32 'Sd, 'Sm");
- }
- break;
- default:
- UNREACHABLE(); // Case analysis is exhaustive.
- break;
- }
- } else if ((instr->Opc1Value() == 0x4) && (instr->Bits(11, 9) == 0x5) &&
- (instr->Bit(4) == 0x0)) {
- // VMAXNM, VMINNM (floating-point)
- if (instr->SzValue() == 0x1) {
- if (instr->Bit(6) == 0x1) {
- Format(instr, "vminnm.f64 'Dd, 'Dn, 'Dm");
- } else {
- Format(instr, "vmaxnm.f64 'Dd, 'Dn, 'Dm");
- }
- } else {
- if (instr->Bit(6) == 0x1) {
- Format(instr, "vminnm.f32 'Sd, 'Sn, 'Sm");
- } else {
- Format(instr, "vmaxnm.f32 'Sd, 'Sn, 'Sm");
- }
- }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vcvt.%s q%d, q%d", suffix, Vd, Vm);
+ }
+ } else if (op0 && op1 == 0b11 && op2 == 0b10) {
+ // VTBL, VTBX
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vn = instr->VFPNRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ int len = instr->Bits(9, 8);
+ NeonListOperand list(DwVfpRegister::from_code(Vn), len + 1);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s d%d, ",
+ instr->Bit(6) == 0 ? "vtbl.8" : "vtbx.8", Vd);
+ FormatNeonList(Vn, list.type());
+ Print(", ");
+ PrintDRegister(Vm);
+ } else if (op0 && op1 == 0b11 && op2 == 0b11) {
+ // Advanced SIMD duplicate (scalar)
+ if (instr->Bits(9, 7) == 0) {
+ // VDUP (scalar)
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ int imm4 = instr->Bits(19, 16);
+ int esize = 0, index = 0;
+ if ((imm4 & 0x1) != 0) {
+ esize = 8;
+ index = imm4 >> 1;
+ } else if ((imm4 & 0x2) != 0) {
+ esize = 16;
+ index = imm4 >> 2;
} else {
- Unknown(instr);
+ esize = 32;
+ index = imm4 >> 3;
}
- break;
- case 0x1C:
- if ((instr->Bits(11, 9) == 0x5) && (instr->Bit(6) == 0) &&
- (instr->Bit(4) == 0)) {
- // VSEL* (floating-point)
- bool dp_operation = (instr->SzValue() == 1);
- switch (instr->Bits(21, 20)) {
- case 0x0:
- if (dp_operation) {
- Format(instr, "vseleq.f64 'Dd, 'Dn, 'Dm");
- } else {
- Format(instr, "vseleq.f32 'Sd, 'Sn, 'Sm");
- }
- break;
- case 0x1:
- if (dp_operation) {
- Format(instr, "vselvs.f64 'Dd, 'Dn, 'Dm");
- } else {
- Format(instr, "vselvs.f32 'Sd, 'Sn, 'Sm");
- }
- break;
- case 0x2:
- if (dp_operation) {
- Format(instr, "vselge.f64 'Dd, 'Dn, 'Dm");
- } else {
- Format(instr, "vselge.f32 'Sd, 'Sn, 'Sm");
- }
- break;
- case 0x3:
- if (dp_operation) {
- Format(instr, "vselgt.f64 'Dd, 'Dn, 'Dm");
- } else {
- Format(instr, "vselgt.f32 'Sd, 'Sn, 'Sm");
- }
- break;
- default:
- UNREACHABLE(); // Case analysis is exhaustive.
- break;
- }
+ if (instr->Bit(6) == 0) {
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vdup.%i d%d, d%d[%d]",
+ esize, Vd, Vm, index);
} else {
- Unknown(instr);
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vdup.%i q%d, d%d[%d]",
+ esize, Vd, Vm, index);
}
- break;
- default:
+ } else {
Unknown(instr);
- break;
+ }
+ } else if (op1 != 0b11 && !op3) {
+ // Advanced SIMD three registers of different lengths.
+ int u = instr->Bit(24);
+ int opc = instr->Bits(11, 8);
+ if (opc == 0b1000) {
+ // vmlal.u<esize> <Qd>, <Dn>, <Dm>
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ int esize = 8 << instr->Bits(21, 20);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmlal.u%d q%d, d%d, d%d", esize, Vd, Vn, Vm);
+ } else if (opc == 0b1100) {
+ // vmull.s/u<esize> Qd, Dn, Dm
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ int esize = 8 << instr->Bits(21, 20);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vmull.%s%d q%d, d%d, d%d",
+ u ? "u" : "s", esize, Vd, Vn, Vm);
+ }
+ } else if (op1 != 0b11 && op3) {
+ // The instructions specified by this encoding are not used in V8.
+ Unknown(instr);
+ } else {
+ Unknown(instr);
+ }
+}
+
+void Decoder::DecodeMemoryHintsAndBarriers(Instruction* instr) {
+ int op0 = instr->Bits(25, 21);
+ if (op0 == 0b01011) {
+ // Barriers.
+ int option = instr->Bits(3, 0);
+ switch (instr->Bits(7, 4)) {
+ case 4:
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "dsb %s",
+ barrier_option_names[option]);
+ break;
+ case 5:
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "dmb %s",
+ barrier_option_names[option]);
+ break;
+ case 6:
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "isb %s",
+ barrier_option_names[option]);
+ break;
+ default:
+ Unknown(instr);
+ }
+ } else if ((op0 & 0b10001) == 0b00000 && !instr->Bit(4)) {
+ // Preload (immediate).
+ const char* rn_name = converter_.NameOfCPURegister(instr->Bits(19, 16));
+ int offset = instr->Bits(11, 0);
+ if (offset == 0) {
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "pld [%s]", rn_name);
+ } else if (instr->Bit(23) == 0) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [%s, #-%d]", rn_name, offset);
+ } else {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [%s, #+%d]", rn_name, offset);
+ }
+ } else {
+ Unknown(instr);
+ }
+}
+
+void Decoder::DecodeAdvancedSIMDElementOrStructureLoadStore(
+ Instruction* instr) {
+ int op0 = instr->Bit(23);
+ int op1 = instr->Bits(11, 10);
+ int l = instr->Bit(21);
+ int n = instr->Bits(9, 8);
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Rn = instr->VnValue();
+ int Rm = instr->VmValue();
+
+ if (op0 == 0) {
+ // Advanced SIMD load/store multiple structures.
+ int itype = instr->Bits(11, 8);
+ if (itype == 0b0010) {
+ // vld1/vst1
+ int size = instr->Bits(7, 6);
+ int align = instr->Bits(5, 4);
+ const char* op = l ? "vld1.%d " : "vst1.%d ";
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, op, (1 << size) << 3);
+ FormatNeonList(Vd, itype);
+ Print(", ");
+ FormatNeonMemory(Rn, align, Rm);
+ } else {
+ Unknown(instr);
+ }
+ } else if (op1 == 0b11) {
+ // Advanced SIMD load single structure to all lanes.
+ if (l && n == 0b00) {
+ // vld1r(replicate) single element to all lanes.
+ int size = instr->Bits(7, 6);
+ DCHECK_NE(0b11, size);
+ int type = instr->Bit(5) ? nlt_2 : nlt_1;
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vld1.%d ", (1 << size) << 3);
+ FormatNeonList(Vd, type);
+ DCHECK_EQ(0, instr->Bit(4)); // Alignment not supported.
+ Print(", ");
+ FormatNeonMemory(Rn, 0, Rm);
+ } else {
+ Unknown(instr);
+ }
+ } else if (op1 != 0b11) {
+ // Advanced SIMD load/store single structure to one lane.
+ int size = op1; // size and op1 occupy the same bits in decoding.
+ if (l && n == 0b00) {
+ // VLD1 (single element to one lane) - A1, A2, A3
+ int index_align = instr->Bits(7, 4);
+ int index = index_align >> (size + 1);
+ // Omit alignment.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vld1.%d {d%d[%d]}",
+ (1 << size) << 3, Vd, index);
+ Print(", ");
+ FormatNeonMemory(Rn, 0, Rm);
+ } else {
+ Unknown(instr);
+ }
+ } else {
+ Unknown(instr);
}
}
diff --git a/deps/v8/src/diagnostics/arm/unwinder-arm.cc b/deps/v8/src/diagnostics/arm/unwinder-arm.cc
new file mode 100644
index 0000000000..171a258a0c
--- /dev/null
+++ b/deps/v8/src/diagnostics/arm/unwinder-arm.cc
@@ -0,0 +1,37 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8-unwinder-state.h"
+#include "src/diagnostics/unwinder.h"
+#include "src/execution/frame-constants.h"
+
+namespace v8 {
+
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {
+ const i::Address base_addr =
+ reinterpret_cast<i::Address>(fp) +
+ i::EntryFrameConstants::kDirectCallerRRegistersOffset;
+
+ if (!register_state->callee_saved) {
+ register_state->callee_saved = std::make_unique<CalleeSavedRegisters>();
+ }
+
+ register_state->callee_saved->arm_r4 =
+ reinterpret_cast<void*>(Load(base_addr + 0 * i::kSystemPointerSize));
+ register_state->callee_saved->arm_r5 =
+ reinterpret_cast<void*>(Load(base_addr + 1 * i::kSystemPointerSize));
+ register_state->callee_saved->arm_r6 =
+ reinterpret_cast<void*>(Load(base_addr + 2 * i::kSystemPointerSize));
+ register_state->callee_saved->arm_r7 =
+ reinterpret_cast<void*>(Load(base_addr + 3 * i::kSystemPointerSize));
+ register_state->callee_saved->arm_r8 =
+ reinterpret_cast<void*>(Load(base_addr + 4 * i::kSystemPointerSize));
+ register_state->callee_saved->arm_r9 =
+ reinterpret_cast<void*>(Load(base_addr + 5 * i::kSystemPointerSize));
+ register_state->callee_saved->arm_r10 =
+ reinterpret_cast<void*>(Load(base_addr + 6 * i::kSystemPointerSize));
+}
+
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/arm64/unwinder-arm64.cc b/deps/v8/src/diagnostics/arm64/unwinder-arm64.cc
new file mode 100644
index 0000000000..5a92512a17
--- /dev/null
+++ b/deps/v8/src/diagnostics/arm64/unwinder-arm64.cc
@@ -0,0 +1,12 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/diagnostics/unwinder.h"
+
+namespace v8 {
+
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/basic-block-profiler.cc b/deps/v8/src/diagnostics/basic-block-profiler.cc
index 95e2cb8dae..22ba4c6da1 100644
--- a/deps/v8/src/diagnostics/basic-block-profiler.cc
+++ b/deps/v8/src/diagnostics/basic-block-profiler.cc
@@ -10,7 +10,7 @@
#include "src/base/lazy-instance.h"
#include "src/heap/heap-inl.h"
-#include "torque-generated/exported-class-definitions-inl.h"
+#include "src/objects/shared-function-info-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/diagnostics/basic-block-profiler.h b/deps/v8/src/diagnostics/basic-block-profiler.h
index 41d0e65ccb..706505939b 100644
--- a/deps/v8/src/diagnostics/basic-block-profiler.h
+++ b/deps/v8/src/diagnostics/basic-block-profiler.h
@@ -14,11 +14,13 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
-#include "torque-generated/exported-class-definitions.h"
+#include "src/objects/shared-function-info.h"
namespace v8 {
namespace internal {
+class OnHeapBasicBlockProfilerData;
+
class BasicBlockProfilerData {
public:
explicit BasicBlockProfilerData(size_t n_blocks);
diff --git a/deps/v8/src/diagnostics/disassembler.cc b/deps/v8/src/diagnostics/disassembler.cc
index 8c7cab195b..a26a4134c2 100644
--- a/deps/v8/src/diagnostics/disassembler.cc
+++ b/deps/v8/src/diagnostics/disassembler.cc
@@ -253,8 +253,7 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
host.as_wasm_code()->native_module()->GetRuntimeStubId(
relocinfo->wasm_stub_call_address()));
out->AddFormatted(" ;; wasm stub: %s", runtime_stub_name);
- } else if (RelocInfo::IsRuntimeEntry(rmode) && isolate &&
- isolate->deoptimizer_data() != nullptr) {
+ } else if (RelocInfo::IsRuntimeEntry(rmode) && isolate != nullptr) {
// A runtime entry relocinfo might be a deoptimization bailout.
Address addr = relocinfo->target_address();
DeoptimizeKind type;
@@ -426,6 +425,8 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
int Disassembler::Decode(Isolate* isolate, std::ostream* os, byte* begin,
byte* end, CodeReference code, Address current_pc) {
+ DCHECK_WITH_MSG(FLAG_text_is_readable,
+ "Builtins disassembly requires a readable .text section");
V8NameConverter v8NameConverter(isolate, code);
if (isolate) {
// We have an isolate, so support external reference names.
diff --git a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
index 80ab5663aa..3dbde536de 100644
--- a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
+++ b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
@@ -2152,37 +2152,21 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
} else if (*data == 0x3A) {
data++;
- if (*data == 0x08) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("roundps %s,%s,%d", NameOfXMMRegister(regop),
- NameOfXMMRegister(rm), static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x09) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("roundpd %s,%s,%d", NameOfXMMRegister(regop),
- NameOfXMMRegister(rm), static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x0A) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("roundss %s,%s,%d", NameOfXMMRegister(regop),
- NameOfXMMRegister(rm), static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x0B) {
+ if (*data >= 0x08 && *data <= 0x0B) {
+ const char* const pseudo_op[] = {
+ "roundps",
+ "roundpd",
+ "roundss",
+ "roundsd",
+ };
+ byte op = *data;
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("roundsd %s,%s,%d", NameOfXMMRegister(regop),
- NameOfXMMRegister(rm), static_cast<int>(imm8));
+ AppendToBuffer("%s %s,%s,%d", pseudo_op[op - 0x08],
+ NameOfXMMRegister(regop), NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
data += 2;
} else if (*data == 0x0E) {
data++;
diff --git a/deps/v8/src/diagnostics/ia32/unwinder-ia32.cc b/deps/v8/src/diagnostics/ia32/unwinder-ia32.cc
new file mode 100644
index 0000000000..5a92512a17
--- /dev/null
+++ b/deps/v8/src/diagnostics/ia32/unwinder-ia32.cc
@@ -0,0 +1,12 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/diagnostics/unwinder.h"
+
+namespace v8 {
+
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/mips/unwinder-mips.cc b/deps/v8/src/diagnostics/mips/unwinder-mips.cc
new file mode 100644
index 0000000000..5a92512a17
--- /dev/null
+++ b/deps/v8/src/diagnostics/mips/unwinder-mips.cc
@@ -0,0 +1,12 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/diagnostics/unwinder.h"
+
+namespace v8 {
+
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/mips64/unwinder-mips64.cc b/deps/v8/src/diagnostics/mips64/unwinder-mips64.cc
new file mode 100644
index 0000000000..5a92512a17
--- /dev/null
+++ b/deps/v8/src/diagnostics/mips64/unwinder-mips64.cc
@@ -0,0 +1,12 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/diagnostics/unwinder.h"
+
+namespace v8 {
+
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index 83a1ac3a9f..6ee2d39f45 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -27,6 +27,7 @@
#include "src/objects/free-space-inl.h"
#include "src/objects/function-kind.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/instance-type.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/layout-descriptor.h"
#include "src/objects/objects-inl.h"
@@ -66,14 +67,15 @@
#include "src/objects/property-descriptor-object-inl.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/struct-inl.h"
+#include "src/objects/synthetic-module-inl.h"
#include "src/objects/template-objects-inl.h"
+#include "src/objects/torque-defined-classes-inl.h"
#include "src/objects/transitions-inl.h"
#include "src/regexp/regexp.h"
#include "src/utils/ostreams.h"
#include "src/wasm/wasm-objects-inl.h"
#include "torque-generated/class-verifiers.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/internal-class-definitions-inl.h"
+#include "torque-generated/runtime-macros.h"
namespace v8 {
namespace internal {
@@ -291,9 +293,11 @@ void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
CHECK(IsBytecodeArray(isolate));
CHECK(constant_pool(isolate).IsFixedArray(isolate));
VerifyHeapPointer(isolate, constant_pool(isolate));
- CHECK(synchronized_source_position_table(isolate).IsUndefined(isolate) ||
- synchronized_source_position_table(isolate).IsException(isolate) ||
- synchronized_source_position_table(isolate).IsByteArray(isolate));
+ {
+ Object table = source_position_table(isolate, kAcquireLoad);
+ CHECK(table.IsUndefined(isolate) || table.IsException(isolate) ||
+ table.IsByteArray(isolate));
+ }
CHECK(handler_table(isolate).IsByteArray(isolate));
for (int i = 0; i < constant_pool(isolate).length(); ++i) {
// No ThinStrings in the constant pool.
@@ -303,7 +307,7 @@ void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
USE_TORQUE_VERIFIER(JSReceiver)
-bool JSObject::ElementsAreSafeToExamine(const Isolate* isolate) const {
+bool JSObject::ElementsAreSafeToExamine(IsolateRoot isolate) const {
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
return elements(isolate) !=
@@ -371,7 +375,7 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
int delta = actual_unused_property_fields - map().UnusedPropertyFields();
CHECK_EQ(0, delta % JSObject::kFieldsAdded);
}
- DescriptorArray descriptors = map().instance_descriptors();
+ DescriptorArray descriptors = map().instance_descriptors(kRelaxedLoad);
bool is_transitionable_fast_elements_kind =
IsTransitionableFastElementsKind(map().elements_kind());
@@ -445,13 +449,13 @@ void Map::MapVerify(Isolate* isolate) {
// Root maps must not have descriptors in the descriptor array that do not
// belong to the map.
CHECK_EQ(NumberOfOwnDescriptors(),
- instance_descriptors().number_of_descriptors());
+ instance_descriptors(kRelaxedLoad).number_of_descriptors());
} else {
// If there is a parent map it must be non-stable.
Map parent = Map::cast(GetBackPointer());
CHECK(!parent.is_stable());
- DescriptorArray descriptors = instance_descriptors();
- if (descriptors == parent.instance_descriptors()) {
+ DescriptorArray descriptors = instance_descriptors(kRelaxedLoad);
+ if (descriptors == parent.instance_descriptors(kRelaxedLoad)) {
if (NumberOfOwnDescriptors() == parent.NumberOfOwnDescriptors() + 1) {
// Descriptors sharing through property transitions takes over
// ownership from the parent map.
@@ -469,14 +473,14 @@ void Map::MapVerify(Isolate* isolate) {
}
}
}
- SLOW_DCHECK(instance_descriptors().IsSortedNoDuplicates());
+ SLOW_DCHECK(instance_descriptors(kRelaxedLoad).IsSortedNoDuplicates());
DisallowHeapAllocation no_gc;
SLOW_DCHECK(
TransitionsAccessor(isolate, *this, &no_gc).IsSortedNoDuplicates());
SLOW_DCHECK(TransitionsAccessor(isolate, *this, &no_gc)
.IsConsistentWithBackPointers());
SLOW_DCHECK(!FLAG_unbox_double_fields ||
- layout_descriptor().IsConsistentWithMap(*this));
+ layout_descriptor(kAcquireLoad).IsConsistentWithMap(*this));
// Only JSFunction maps have has_prototype_slot() bit set and constructible
// JSFunction objects must have prototype slot.
CHECK_IMPLIES(has_prototype_slot(), instance_type() == JS_FUNCTION_TYPE);
@@ -484,7 +488,7 @@ void Map::MapVerify(Isolate* isolate) {
CHECK(!has_named_interceptor());
CHECK(!is_dictionary_map());
CHECK(!is_access_check_needed());
- DescriptorArray const descriptors = instance_descriptors();
+ DescriptorArray const descriptors = instance_descriptors(kRelaxedLoad);
for (InternalIndex i : IterateOwnDescriptors()) {
CHECK(!descriptors.GetKey(i).IsInterestingSymbol());
}
@@ -508,7 +512,7 @@ void Map::DictionaryMapVerify(Isolate* isolate) {
CHECK(is_dictionary_map());
CHECK_EQ(kInvalidEnumCacheSentinel, EnumLength());
CHECK_EQ(ReadOnlyRoots(isolate).empty_descriptor_array(),
- instance_descriptors());
+ instance_descriptors(kRelaxedLoad));
CHECK_EQ(0, UnusedPropertyFields());
CHECK_EQ(Map::GetVisitorId(*this), visitor_id());
}
@@ -574,7 +578,7 @@ void NativeContext::NativeContextVerify(Isolate* isolate) {
}
void FeedbackMetadata::FeedbackMetadataVerify(Isolate* isolate) {
- if (slot_count() == 0 && closure_feedback_cell_count() == 0) {
+ if (slot_count() == 0 && create_closure_slot_count() == 0) {
CHECK_EQ(ReadOnlyRoots(isolate).empty_feedback_metadata(), *this);
} else {
FeedbackMetadataIterator iter(*this);
@@ -820,7 +824,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(LocalIsolate* isolate) {
}
void SharedFunctionInfo::SharedFunctionInfoVerify(ReadOnlyRoots roots) {
- Object value = name_or_scope_info();
+ Object value = name_or_scope_info(kAcquireLoad);
if (value.IsScopeInfo()) {
CHECK_LT(0, ScopeInfo::cast(value).length());
CHECK_NE(value, roots.empty_scope_info());
@@ -832,8 +836,11 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(ReadOnlyRoots roots) {
HasUncompiledDataWithoutPreparseData() || HasWasmJSFunctionData() ||
HasWasmCapiFunctionData());
- CHECK(script_or_debug_info().IsUndefined(roots) ||
- script_or_debug_info().IsScript() || HasDebugInfo());
+ {
+ auto script = script_or_debug_info(kAcquireLoad);
+ CHECK(script.IsUndefined(roots) || script.IsScript() ||
+ script.IsDebugInfo());
+ }
if (!is_compiled()) {
CHECK(!HasFeedbackMetadata());
@@ -865,11 +872,6 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(ReadOnlyRoots roots) {
CHECK(!construct_as_builtin());
}
}
-
- // At this point we only support skipping arguments adaptor frames
- // for strict mode functions (see https://crbug.com/v8/8895).
- CHECK_IMPLIES(is_safe_to_skip_arguments_adaptor(),
- language_mode() == LanguageMode::kStrict);
}
void JSGlobalProxy::JSGlobalProxyVerify(Isolate* isolate) {
@@ -944,13 +946,16 @@ void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
}
void Code::CodeVerify(Isolate* isolate) {
- CHECK_IMPLIES(
- has_safepoint_table(),
- IsAligned(safepoint_table_offset(), static_cast<unsigned>(kIntSize)));
+ CHECK(IsAligned(InstructionSize(),
+ static_cast<unsigned>(Code::kMetadataAlignment)));
+ CHECK_EQ(safepoint_table_offset(), 0);
CHECK_LE(safepoint_table_offset(), handler_table_offset());
CHECK_LE(handler_table_offset(), constant_pool_offset());
CHECK_LE(constant_pool_offset(), code_comments_offset());
- CHECK_LE(code_comments_offset(), InstructionSize());
+ CHECK_LE(code_comments_offset(), unwinding_info_offset());
+ CHECK_LE(unwinding_info_offset(), MetadataSize());
+ CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
+ IsAligned(InstructionStart(), kCodeAlignment));
CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
IsAligned(raw_instruction_start(), kCodeAlignment));
// TODO(delphick): Refactor Factory::CodeBuilder::BuildInternal, so that the
@@ -959,7 +964,8 @@ void Code::CodeVerify(Isolate* isolate) {
// everything is set up.
// CHECK_EQ(ReadOnlyHeap::Contains(*this), !IsExecutable());
relocation_info().ObjectVerify(isolate);
- CHECK(Code::SizeFor(body_size()) <= kMaxRegularHeapObjectSize ||
+ CHECK(V8_ENABLE_THIRD_PARTY_HEAP_BOOL ||
+ CodeSize() <= MemoryChunkLayout::MaxRegularCodeObjectSize() ||
isolate->heap()->InSpace(*this, CODE_LO_SPACE));
Address last_gc_pc = kNullAddress;
@@ -1174,6 +1180,7 @@ void SmallOrderedHashTable<Derived>::SmallOrderedHashTableVerify(
}
}
void SmallOrderedHashMap::SmallOrderedHashMapVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::SmallOrderedHashMapVerify(*this, isolate);
SmallOrderedHashTable<SmallOrderedHashMap>::SmallOrderedHashTableVerify(
isolate);
for (int entry = NumberOfElements(); entry < NumberOfDeletedElements();
@@ -1186,6 +1193,7 @@ void SmallOrderedHashMap::SmallOrderedHashMapVerify(Isolate* isolate) {
}
void SmallOrderedHashSet::SmallOrderedHashSetVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::SmallOrderedHashSetVerify(*this, isolate);
SmallOrderedHashTable<SmallOrderedHashSet>::SmallOrderedHashTableVerify(
isolate);
for (int entry = NumberOfElements(); entry < NumberOfDeletedElements();
@@ -1199,6 +1207,8 @@ void SmallOrderedHashSet::SmallOrderedHashSetVerify(Isolate* isolate) {
void SmallOrderedNameDictionary::SmallOrderedNameDictionaryVerify(
Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::SmallOrderedNameDictionaryVerify(*this,
+ isolate);
SmallOrderedHashTable<
SmallOrderedNameDictionary>::SmallOrderedHashTableVerify(isolate);
for (int entry = NumberOfElements(); entry < NumberOfDeletedElements();
@@ -1375,6 +1385,17 @@ void Module::ModuleVerify(Isolate* isolate) {
CHECK_NE(hash(), 0);
}
+void ModuleRequest::ModuleRequestVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::ModuleRequestVerify(*this, isolate);
+ CHECK_EQ(0, import_assertions().length() % 3);
+
+ for (int i = 0; i < import_assertions().length(); i += 3) {
+ CHECK(import_assertions().get(i).IsString()); // Assertion key
+ CHECK(import_assertions().get(i + 1).IsString()); // Assertion value
+ CHECK(import_assertions().get(i + 2).IsSmi()); // Assertion location
+ }
+}
+
void SourceTextModule::SourceTextModuleVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::SourceTextModuleVerify(*this, isolate);
@@ -1540,8 +1561,6 @@ void CallHandlerInfo::CallHandlerInfoVerify(Isolate* isolate) {
.next_call_side_effect_free_call_handler_info_map());
}
-USE_TORQUE_VERIFIER(WasmCapiFunctionData)
-
USE_TORQUE_VERIFIER(WasmJSFunctionData)
USE_TORQUE_VERIFIER(WasmIndirectFunctionTable)
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index 9afe8e9445..d65c0eeb4b 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -5,73 +5,22 @@
#include <iomanip>
#include <memory>
+#include "src/common/globals.h"
+#include "src/compiler/node.h"
#include "src/diagnostics/disasm.h"
#include "src/diagnostics/disassembler.h"
#include "src/heap/heap-inl.h" // For InOldSpace.
#include "src/heap/heap-write-barrier-inl.h" // For GetIsolateFromWritableObj.
#include "src/init/bootstrapper.h"
#include "src/interpreter/bytecodes.h"
-#include "src/objects/arguments-inl.h"
-#include "src/objects/cell-inl.h"
-#include "src/objects/data-handler-inl.h"
-#include "src/objects/debug-objects-inl.h"
-#include "src/objects/embedder-data-array-inl.h"
-#include "src/objects/embedder-data-slot-inl.h"
-#include "src/objects/feedback-cell-inl.h"
-#include "src/objects/foreign-inl.h"
-#include "src/objects/free-space-inl.h"
-#include "src/objects/hash-table-inl.h"
-#include "src/objects/heap-number-inl.h"
-#include "src/objects/js-array-buffer-inl.h"
-#include "src/objects/js-array-inl.h"
-#include "src/objects/objects-inl.h"
-#include "src/objects/objects.h"
-#include "src/snapshot/embedded/embedded-data.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/objects/js-break-iterator-inl.h"
-#include "src/objects/js-collator-inl.h"
-#endif // V8_INTL_SUPPORT
-#include "src/objects/js-collection-inl.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/objects/js-date-time-format-inl.h"
-#include "src/objects/js-display-names-inl.h"
-#endif // V8_INTL_SUPPORT
-#include "src/objects/js-generator-inl.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/objects/js-list-format-inl.h"
-#include "src/objects/js-locale-inl.h"
-#include "src/objects/js-number-format-inl.h"
-#include "src/objects/js-plural-rules-inl.h"
-#endif // V8_INTL_SUPPORT
-#include "src/objects/js-regexp-inl.h"
-#include "src/objects/js-regexp-string-iterator-inl.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/objects/js-relative-time-format-inl.h"
-#include "src/objects/js-segment-iterator-inl.h"
-#include "src/objects/js-segmenter-inl.h"
-#include "src/objects/js-segments-inl.h"
-#endif // V8_INTL_SUPPORT
-#include "src/compiler/node.h"
-#include "src/objects/js-weak-refs-inl.h"
-#include "src/objects/literal-objects-inl.h"
-#include "src/objects/microtask-inl.h"
-#include "src/objects/module-inl.h"
-#include "src/objects/oddball-inl.h"
-#include "src/objects/promise-inl.h"
-#include "src/objects/property-descriptor-object-inl.h"
-#include "src/objects/stack-frame-info-inl.h"
-#include "src/objects/string-set-inl.h"
-#include "src/objects/struct-inl.h"
-#include "src/objects/template-objects-inl.h"
-#include "src/objects/transitions-inl.h"
+#include "src/objects/all-objects-inl.h"
+#include "src/objects/code-kind.h"
#include "src/regexp/regexp.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/utils/ostreams.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "torque-generated/class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/internal-class-definitions-inl.h"
namespace v8 {
namespace internal {
@@ -284,7 +233,7 @@ void FreeSpace::FreeSpacePrint(std::ostream& os) { // NOLINT
bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
if (HasFastProperties()) {
- DescriptorArray descs = map().instance_descriptors();
+ DescriptorArray descs = map().instance_descriptors(kRelaxedLoad);
int nof_inobject_properties = map().GetInObjectProperties();
for (InternalIndex i : map().IterateOwnDescriptors()) {
os << "\n ";
@@ -307,16 +256,23 @@ bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
}
os << " ";
details.PrintAsFastTo(os, PropertyDetails::kForProperties);
- if (details.location() != kField) continue;
- int field_index = details.field_index();
- if (nof_inobject_properties <= field_index) {
- field_index -= nof_inobject_properties;
- os << " properties[" << field_index << "]";
+ if (details.location() == kField) {
+ int field_index = details.field_index();
+ if (field_index < nof_inobject_properties) {
+ os << ", location: in-object";
+ } else {
+ field_index -= nof_inobject_properties;
+ os << ", location: properties[" << field_index << "]";
+ }
+ } else {
+ os << ", location: descriptor";
}
}
return map().NumberOfOwnDescriptors() > 0;
} else if (IsJSGlobalObject()) {
JSGlobalObject::cast(*this).global_dictionary().Print(os);
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ property_dictionary_ordered().Print(os);
} else {
property_dictionary().Print(os);
}
@@ -468,7 +424,7 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
}
}
-void PrintEmbedderData(const Isolate* isolate, std::ostream& os,
+void PrintEmbedderData(IsolateRoot isolate, std::ostream& os,
EmbedderDataSlot slot) {
DisallowHeapAllocation no_gc;
Object value = slot.load_tagged();
@@ -568,9 +524,10 @@ static void JSObjectPrintBody(std::ostream& os,
if (!properties_or_hash.IsSmi()) {
os << Brief(properties_or_hash);
}
- os << " {";
+ os << "\n - All own properties (excluding elements): {";
if (obj.PrintProperties(os)) os << "\n ";
os << "}\n";
+
if (print_elements) {
size_t length = obj.IsJSTypedArray() ? JSTypedArray::cast(obj).length()
: obj.elements().length();
@@ -578,7 +535,7 @@ static void JSObjectPrintBody(std::ostream& os,
}
int embedder_fields = obj.GetEmbedderFieldCount();
if (embedder_fields > 0) {
- const Isolate* isolate = GetIsolateForPtrCompr(obj);
+ IsolateRoot isolate = GetIsolateForPtrCompr(obj);
os << " - embedder fields = {";
for (int i = 0; i < embedder_fields; i++) {
os << "\n ";
@@ -772,7 +729,7 @@ void PrintWeakArrayElements(std::ostream& os, T* array) {
} // namespace
void EmbedderDataArray::EmbedderDataArrayPrint(std::ostream& os) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
PrintHeader(os, "EmbedderDataArray");
os << "\n - length: " << length();
EmbedderDataSlot start(*this, 0);
@@ -888,14 +845,13 @@ void FeedbackVectorSpec::Print() {
}
void FeedbackVectorSpec::FeedbackVectorSpecPrint(std::ostream& os) { // NOLINT
- int slot_count = slots();
- os << " - slot_count: " << slot_count;
- if (slot_count == 0) {
+ os << " - slot_count: " << slot_count();
+ if (slot_count() == 0) {
os << " (empty)\n";
return;
}
- for (int slot = 0; slot < slot_count;) {
+ for (int slot = 0; slot < slot_count();) {
FeedbackSlotKind kind = GetKind(FeedbackSlot(slot));
int entry_size = FeedbackMetadata::GetSlotSize(kind);
DCHECK_LT(0, entry_size);
@@ -908,6 +864,7 @@ void FeedbackVectorSpec::FeedbackVectorSpecPrint(std::ostream& os) { // NOLINT
void FeedbackMetadata::FeedbackMetadataPrint(std::ostream& os) {
PrintHeader(os, "FeedbackMetadata");
os << "\n - slot_count: " << slot_count();
+ os << "\n - create_closure_slot_count: " << create_closure_slot_count();
FeedbackMetadataIterator iter(*this);
while (iter.HasNext()) {
@@ -931,12 +888,13 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
}
os << "\n - shared function info: " << Brief(shared_function_info());
- os << "\n - optimized code/marker: ";
if (has_optimized_code()) {
- os << Brief(optimized_code());
+ os << "\n - optimized code: " << Brief(optimized_code());
} else {
- os << optimization_marker();
+ os << "\n - no optimized code";
}
+ os << "\n - optimization marker: " << optimization_marker();
+ os << "\n - optimization tier: " << optimization_tier();
os << "\n - invocation count: " << invocation_count();
os << "\n - profiler ticks: " << profiler_ticks();
@@ -1279,13 +1237,12 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
os << "\n - formal_parameter_count: "
<< shared().internal_formal_parameter_count();
- if (shared().is_safe_to_skip_arguments_adaptor()) {
- os << "\n - safe_to_skip_arguments_adaptor";
- }
os << "\n - kind: " << shared().kind();
os << "\n - context: " << Brief(context());
os << "\n - code: " << Brief(code());
- if (ActiveTierIsIgnition()) {
+ if (code().kind() == CodeKind::FOR_TESTING) {
+ os << "\n - FOR_TESTING";
+ } else if (ActiveTierIsIgnition()) {
os << "\n - interpreted";
if (shared().HasBytecodeArray()) {
os << "\n - bytecode: " << shared().GetBytecodeArray();
@@ -1364,12 +1321,9 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
}
os << "\n - function_map_index: " << function_map_index();
os << "\n - formal_parameter_count: " << internal_formal_parameter_count();
- if (is_safe_to_skip_arguments_adaptor()) {
- os << "\n - safe_to_skip_arguments_adaptor";
- }
os << "\n - expected_nof_properties: " << expected_nof_properties();
os << "\n - language_mode: " << language_mode();
- os << "\n - data: " << Brief(function_data());
+ os << "\n - data: " << Brief(function_data(kAcquireLoad));
os << "\n - code (from data): ";
os << Brief(GetCode());
PrintSourceCode(os);
@@ -1913,7 +1867,7 @@ void FunctionTemplateInfo::FunctionTemplateInfoPrint(
os << "\n - tag: " << tag();
os << "\n - serial_number: " << serial_number();
os << "\n - property_list: " << Brief(property_list());
- os << "\n - call_code: " << Brief(call_code());
+ os << "\n - call_code: " << Brief(call_code(kAcquireLoad));
os << "\n - property_accessors: " << Brief(property_accessors());
os << "\n - signature: " << Brief(signature());
os << "\n - cached_property_name: " << Brief(cached_property_name());
@@ -1924,16 +1878,6 @@ void FunctionTemplateInfo::FunctionTemplateInfoPrint(
os << "\n";
}
-void WasmCapiFunctionData::WasmCapiFunctionDataPrint(
- std::ostream& os) { // NOLINT
- PrintHeader(os, "WasmCapiFunctionData");
- os << "\n - call_target: " << call_target();
- os << "\n - embedder_data: " << Brief(embedder_data());
- os << "\n - wrapper_code: " << Brief(wrapper_code());
- os << "\n - serialized_signature: " << Brief(serialized_signature());
- os << "\n";
-}
-
void WasmIndirectFunctionTable::WasmIndirectFunctionTablePrint(
std::ostream& os) {
PrintHeader(os, "WasmIndirectFunctionTable");
@@ -2393,7 +2337,7 @@ int Name::NameShortPrint(Vector<char> str) {
void Map::PrintMapDetails(std::ostream& os) {
DisallowHeapAllocation no_gc;
this->MapPrint(os);
- instance_descriptors().PrintDescriptors(os);
+ instance_descriptors(kRelaxedLoad).PrintDescriptors(os);
}
void Map::MapPrint(std::ostream& os) { // NOLINT
@@ -2447,10 +2391,10 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
os << "\n - prototype_validity cell: " << Brief(prototype_validity_cell());
os << "\n - instance descriptors " << (owns_descriptors() ? "(own) " : "")
<< "#" << NumberOfOwnDescriptors() << ": "
- << Brief(instance_descriptors());
+ << Brief(instance_descriptors(kRelaxedLoad));
if (FLAG_unbox_double_fields) {
os << "\n - layout descriptor: ";
- layout_descriptor().ShortPrint(os);
+ layout_descriptor(kAcquireLoad).ShortPrint(os);
}
// Read-only maps can't have transitions, which is fortunate because we need
@@ -2563,7 +2507,7 @@ void TransitionsAccessor::PrintOneTransition(std::ostream& os, Name key,
DCHECK(!IsSpecialTransition(roots, key));
os << "(transition to ";
InternalIndex descriptor = target.LastAdded();
- DescriptorArray descriptors = target.instance_descriptors();
+ DescriptorArray descriptors = target.instance_descriptors(kRelaxedLoad);
descriptors.PrintDescriptorDetails(os, descriptor,
PropertyDetails::kForTransitions);
os << ")";
@@ -2641,7 +2585,7 @@ void TransitionsAccessor::PrintTransitionTree(std::ostream& os, int level,
DCHECK(!IsSpecialTransition(ReadOnlyRoots(isolate_), key));
os << "to ";
InternalIndex descriptor = target.LastAdded();
- DescriptorArray descriptors = target.instance_descriptors();
+ DescriptorArray descriptors = target.instance_descriptors(kRelaxedLoad);
descriptors.PrintDescriptorDetails(os, descriptor,
PropertyDetails::kForTransitions);
}
diff --git a/deps/v8/src/diagnostics/perf-jit.cc b/deps/v8/src/diagnostics/perf-jit.cc
index 3efdcc08db..4b83325f1a 100644
--- a/deps/v8/src/diagnostics/perf-jit.cc
+++ b/deps/v8/src/diagnostics/perf-jit.cc
@@ -205,7 +205,9 @@ void PerfJitLogger::LogRecordedBuffer(
int length) {
if (FLAG_perf_basic_prof_only_functions &&
(abstract_code->kind() != CodeKind::INTERPRETED_FUNCTION &&
- abstract_code->kind() != CodeKind::OPTIMIZED_FUNCTION)) {
+ abstract_code->kind() != CodeKind::TURBOFAN &&
+ abstract_code->kind() != CodeKind::NATIVE_CONTEXT_INDEPENDENT &&
+ abstract_code->kind() != CodeKind::TURBOPROP)) {
return;
}
@@ -231,14 +233,11 @@ void PerfJitLogger::LogRecordedBuffer(
const char* code_name = name;
uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->InstructionStart());
- // Code generated by Turbofan will have the safepoint table directly after
- // instructions. There is no need to record the safepoint table itself.
- uint32_t code_size = code->ExecutableInstructionSize();
-
// Unwinding info comes right after debug info.
if (FLAG_perf_prof_unwinding_info) LogWriteUnwindingInfo(*code);
- WriteJitCodeLoadEntry(code_pointer, code_size, code_name, length);
+ WriteJitCodeLoadEntry(code_pointer, code->InstructionSize(), code_name,
+ length);
}
void PerfJitLogger::LogRecordedBuffer(const wasm::WasmCode* code,
diff --git a/deps/v8/src/diagnostics/ppc/unwinder-ppc.cc b/deps/v8/src/diagnostics/ppc/unwinder-ppc.cc
new file mode 100644
index 0000000000..43c6acb609
--- /dev/null
+++ b/deps/v8/src/diagnostics/ppc/unwinder-ppc.cc
@@ -0,0 +1,8 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "src/diagnostics/unwinder.h"
+namespace v8 {
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/s390/unwinder-s390.cc b/deps/v8/src/diagnostics/s390/unwinder-s390.cc
new file mode 100644
index 0000000000..43c6acb609
--- /dev/null
+++ b/deps/v8/src/diagnostics/s390/unwinder-s390.cc
@@ -0,0 +1,8 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "src/diagnostics/unwinder.h"
+namespace v8 {
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/unwinder.cc b/deps/v8/src/diagnostics/unwinder.cc
index c4a559c9d9..1dd122a118 100644
--- a/deps/v8/src/diagnostics/unwinder.cc
+++ b/deps/v8/src/diagnostics/unwinder.cc
@@ -2,15 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/diagnostics/unwinder.h"
+
#include <algorithm>
-#include "include/v8.h"
-#include "src/common/globals.h"
-#include "src/execution/frame-constants.h"
#include "src/execution/pointer-authentication.h"
namespace v8 {
+// Architecture specific. Implemented in unwinder-<arch>.cc.
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state);
+
+i::Address Load(i::Address address) {
+ return *reinterpret_cast<i::Address*>(address);
+}
+
namespace {
const i::byte* CalculateEnd(const void* start, size_t length_in_bytes) {
@@ -61,13 +68,15 @@ bool IsInUnsafeJSEntryRange(const JSEntryStubs& entry_stubs, void* pc) {
// within JSEntry.
}
-i::Address Load(i::Address address) {
- return *reinterpret_cast<i::Address*>(address);
+bool AddressIsInStack(const void* address, const void* stack_base,
+ const void* stack_top) {
+ return address <= stack_base && address >= stack_top;
}
void* GetReturnAddressFromFP(void* fp, void* pc,
const JSEntryStubs& entry_stubs) {
int caller_pc_offset = i::CommonFrameConstants::kCallerPCOffset;
+// TODO(solanes): Implement the JSEntry range case also for x64 here and below.
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
if (IsInJSEntryRange(entry_stubs, pc)) {
caller_pc_offset = i::EntryFrameConstants::kDirectCallerPCOffset;
@@ -100,11 +109,6 @@ void* GetCallerSPFromFP(void* fp, void* pc, const JSEntryStubs& entry_stubs) {
caller_sp_offset);
}
-bool AddressIsInStack(const void* address, const void* stack_base,
- const void* stack_top) {
- return address <= stack_base && address >= stack_top;
-}
-
} // namespace
bool Unwinder::TryUnwindV8Frames(const JSEntryStubs& entry_stubs,
@@ -145,6 +149,10 @@ bool Unwinder::TryUnwindV8Frames(const JSEntryStubs& entry_stubs,
// Link register no longer valid after unwinding.
register_state->lr = nullptr;
+
+ if (IsInJSEntryRange(entry_stubs, pc)) {
+ GetCalleeSavedRegistersFromEntryFrame(current_fp, register_state);
+ }
return true;
}
return false;
diff --git a/deps/v8/src/diagnostics/unwinder.h b/deps/v8/src/diagnostics/unwinder.h
new file mode 100644
index 0000000000..4cad2897fd
--- /dev/null
+++ b/deps/v8/src/diagnostics/unwinder.h
@@ -0,0 +1,17 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DIAGNOSTICS_UNWINDER_H_
+#define V8_DIAGNOSTICS_UNWINDER_H_
+
+#include "include/v8.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+
+i::Address Load(i::Address address);
+
+} // namespace v8
+
+#endif // V8_DIAGNOSTICS_UNWINDER_H_
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index c39adcf710..f3b9a753af 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -16,37 +16,6 @@
#error "Unsupported OS"
#endif // V8_OS_WIN_X64
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
-
-
namespace v8 {
namespace internal {
namespace win64_unwindinfo {
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index 641db9f4e7..7ae330c3ea 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -28,7 +28,12 @@ enum OperandType {
// Fixed 8-bit operands.
BYTE_SIZE_OPERAND_FLAG = 4,
BYTE_REG_OPER_OP_ORDER = REG_OPER_OP_ORDER | BYTE_SIZE_OPERAND_FLAG,
- BYTE_OPER_REG_OP_ORDER = OPER_REG_OP_ORDER | BYTE_SIZE_OPERAND_FLAG
+ BYTE_OPER_REG_OP_ORDER = OPER_REG_OP_ORDER | BYTE_SIZE_OPERAND_FLAG,
+ // XMM registers/operands can be mixed with normal operands.
+ OPER_XMMREG_OP_ORDER,
+ XMMREG_OPER_OP_ORDER,
+ XMMREG_XMMOPER_OP_ORDER,
+ XMMOPER_XMMREG_OP_ORDER,
};
//------------------------------------------------------------------
@@ -444,6 +449,7 @@ class DisassemblerX64 {
int PrintImmediateOp(byte* data);
const char* TwoByteMnemonic(byte opcode);
int TwoByteOpcodeInstruction(byte* data);
+ int ThreeByteOpcodeInstruction(byte* data);
int F6F7Instruction(byte* data);
int ShiftInstruction(byte* data);
int JumpShort(byte* data);
@@ -622,6 +628,28 @@ int DisassemblerX64::PrintOperands(const char* mnem, OperandType op_order,
AppendToBuffer(",%s", register_name);
break;
}
+ case XMMREG_XMMOPER_OP_ORDER: {
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ advance = PrintRightXMMOperand(data);
+ break;
+ }
+ case XMMOPER_XMMREG_OP_ORDER: {
+ AppendToBuffer("%s ", mnem);
+ advance = PrintRightXMMOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
+ }
+ case OPER_XMMREG_OP_ORDER: {
+ AppendToBuffer("%s ", mnem);
+ advance = PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
+ }
+ case XMMREG_OPER_OP_ORDER: {
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ advance = PrintRightOperand(data);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1019,6 +1047,13 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += PrintRightOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
+ case 0x4A: {
+ AppendToBuffer("vblendvps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister((*current++) >> 4));
+ break;
+ }
case 0x4B: {
AppendToBuffer("vblendvpd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1026,6 +1061,13 @@ int DisassemblerX64::AVXInstruction(byte* data) {
AppendToBuffer(",%s", NameOfXMMRegister((*current++) >> 4));
break;
}
+ case 0x4C: {
+ AppendToBuffer("vpblendvb %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister((*current++) >> 4));
+ break;
+ }
default:
UnimplementedInstruction();
}
@@ -1335,11 +1377,32 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
- case 0x16:
- AppendToBuffer("vmovlhps %s,%s,", NameOfXMMRegister(regop),
+ case 0x12:
+ AppendToBuffer("vmovlps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x13:
+ AppendToBuffer("vmovlps ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
+ case 0x16:
+ if (mod == 0b11) {
+ AppendToBuffer("vmovlhps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ } else {
+ AppendToBuffer("vmovhps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ }
+ break;
+ case 0x17:
+ AppendToBuffer("vmovhps ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
case 0x28:
AppendToBuffer("vmovaps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -1805,432 +1868,271 @@ int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
// Handle all two-byte opcodes, which start with 0x0F.
// These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix.
-// We do not use any three-byte opcodes, which start with 0x0F38 or 0x0F3A.
int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
byte opcode = *(data + 1);
byte* current = data + 2;
// At return, "current" points to the start of the next instruction.
const char* mnemonic = TwoByteMnemonic(opcode);
+ // Not every instruction will use this, but it doesn't hurt to figure it out
+ // here, since it doesn't update any pointers.
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
if (operand_size_ == 0x66) {
+ // These are three-byte opcodes, see ThreeByteOpcodeInstruction.
+ DCHECK_NE(0x38, opcode);
+ DCHECK_NE(0x3A, opcode);
// 0x66 0x0F prefix.
- int mod, regop, rm;
- if (opcode == 0x38) {
- byte third_byte = *current;
- current = data + 3;
- get_modrm(*current, &mod, &regop, &rm);
- switch (third_byte) {
- case 0x15: {
- AppendToBuffer("blendvpd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",<xmm0>");
- break;
- }
-#define SSE34_DIS_CASE(instruction, notUsed1, notUsed2, notUsed3, opcode) \
- case 0x##opcode: { \
- AppendToBuffer(#instruction " %s,", NameOfXMMRegister(regop)); \
- current += PrintRightXMMOperand(current); \
- break; \
- }
-
- SSSE3_INSTRUCTION_LIST(SSE34_DIS_CASE)
- SSSE3_UNOP_INSTRUCTION_LIST(SSE34_DIS_CASE)
- SSE4_INSTRUCTION_LIST(SSE34_DIS_CASE)
- SSE4_UNOP_INSTRUCTION_LIST(SSE34_DIS_CASE)
- SSE4_2_INSTRUCTION_LIST(SSE34_DIS_CASE)
-#undef SSE34_DIS_CASE
- default:
- UnimplementedInstruction();
+ if (opcode == 0xC1) {
+ current += PrintOperands("xadd", OPER_REG_OP_ORDER, current);
+ } else if (opcode == 0x1F) {
+ current++;
+ if (rm == 4) { // SIB byte present.
+ current++;
}
- } else if (opcode == 0x3A) {
- byte third_byte = *current;
- current = data + 3;
- if (third_byte == 0x17) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("extractps "); // reg/m32, xmm, imm8
- current += PrintRightOperand(current);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
- current += 1;
- } else if (third_byte == 0x08) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("roundps %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",0x%x", (*current) & 3);
- current += 1;
- } else if (third_byte == 0x09) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("roundpd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",0x%x", (*current) & 3);
- current += 1;
- } else if (third_byte == 0x0A) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("roundss %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",0x%x", (*current) & 3);
- current += 1;
- } else if (third_byte == 0x0B) {
- get_modrm(*current, &mod, &regop, &rm);
- // roundsd xmm, xmm/m64, imm8
- AppendToBuffer("roundsd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",0x%x", (*current) & 3);
- current += 1;
- } else if (third_byte == 0x0E) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pblendw %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",0x%x", *current);
- current += 1;
- } else if (third_byte == 0x0F) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("palignr %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",0x%x", (*current));
- current += 1;
- } else if (third_byte == 0x14) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pextrb "); // reg/m32, xmm, imm8
- current += PrintRightOperand(current);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
- current += 1;
- } else if (third_byte == 0x15) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pextrw "); // reg/m32, xmm, imm8
- current += PrintRightOperand(current);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 7);
- current += 1;
- } else if (third_byte == 0x16) {
- get_modrm(*current, &mod, &regop, &rm);
- // reg/m32/reg/m64, xmm, imm8
- AppendToBuffer("pextr%c ", rex_w() ? 'q' : 'd');
- current += PrintRightOperand(current);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
- current += 1;
- } else if (third_byte == 0x20) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pinsrb "); // xmm, reg/m32, imm8
- AppendToBuffer(" %s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- AppendToBuffer(",%d", (*current) & 3);
- current += 1;
- } else if (third_byte == 0x21) {
- get_modrm(*current, &mod, &regop, &rm);
- // insertps xmm, xmm/m32, imm8
- AppendToBuffer("insertps %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",0x%x", (*current));
- current += 1;
- } else if (third_byte == 0x22) {
- get_modrm(*current, &mod, &regop, &rm);
- // xmm, reg/m32/reg/m64, imm8
- AppendToBuffer("pinsr%c ", rex_w() ? 'q' : 'd');
- AppendToBuffer(" %s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- AppendToBuffer(",%d", (*current) & 3);
+ if (mod == 1) { // Byte displacement.
current += 1;
+ } else if (mod == 2) { // 32-bit displacement.
+ current += 4;
+ } // else no immediate displacement.
+ AppendToBuffer("nop");
+ } else if (opcode == 0x10) {
+ current += PrintOperands("movupd", XMMREG_XMMOPER_OP_ORDER, current);
+ } else if (opcode == 0x11) {
+ current += PrintOperands("movupd", XMMOPER_XMMREG_OP_ORDER, current);
+ } else if (opcode == 0x28) {
+ current += PrintOperands("movapd", XMMREG_XMMOPER_OP_ORDER, current);
+ } else if (opcode == 0x29) {
+ current += PrintOperands("movapd", XMMOPER_XMMREG_OP_ORDER, current);
+ } else if (opcode == 0x6E) {
+ current += PrintOperands(rex_w() ? "movq" : "movd",
+ XMMREG_XMMOPER_OP_ORDER, current);
+ } else if (opcode == 0x6F) {
+ current += PrintOperands("movdqa", XMMREG_XMMOPER_OP_ORDER, current);
+ } else if (opcode == 0x7E) {
+ current += PrintOperands(rex_w() ? "movq" : "movd",
+ XMMOPER_XMMREG_OP_ORDER, current);
+ } else if (opcode == 0x7F) {
+ current += PrintOperands("movdqa", XMMOPER_XMMREG_OP_ORDER, current);
+ } else if (opcode == 0xD6) {
+ current += PrintOperands("movq", XMMOPER_XMMREG_OP_ORDER, current);
+ } else if (opcode == 0x50) {
+ AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x70) {
+ current += PrintOperands("pshufd", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", *current++);
+ } else if (opcode == 0x71) {
+ current += 1;
+ AppendToBuffer("ps%sw %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
+ *current & 0x7F);
+ current += 1;
+ } else if (opcode == 0x72) {
+ current += 1;
+ AppendToBuffer("ps%sd %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
+ *current & 0x7F);
+ current += 1;
+ } else if (opcode == 0x73) {
+ current += 1;
+ AppendToBuffer("ps%sq %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
+ *current & 0x7F);
+ current += 1;
+ } else if (opcode == 0xB1) {
+ current += PrintOperands("cmpxchg", OPER_REG_OP_ORDER, current);
+ } else if (opcode == 0xC4) {
+ current += PrintOperands("pinsrw", XMMREG_OPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", (*current++) & 7);
+ } else {
+ const char* mnemonic;
+ if (opcode == 0x51) {
+ mnemonic = "sqrtpd";
+ } else if (opcode == 0x54) {
+ mnemonic = "andpd";
+ } else if (opcode == 0x55) {
+ mnemonic = "andnpd";
+ } else if (opcode == 0x56) {
+ mnemonic = "orpd";
+ } else if (opcode == 0x57) {
+ mnemonic = "xorpd";
+ } else if (opcode == 0x58) {
+ mnemonic = "addpd";
+ } else if (opcode == 0x59) {
+ mnemonic = "mulpd";
+ } else if (opcode == 0x5B) {
+ mnemonic = "cvtps2dq";
+ } else if (opcode == 0x5C) {
+ mnemonic = "subpd";
+ } else if (opcode == 0x5D) {
+ mnemonic = "minpd";
+ } else if (opcode == 0x5E) {
+ mnemonic = "divpd";
+ } else if (opcode == 0x5F) {
+ mnemonic = "maxpd";
+ } else if (opcode == 0x60) {
+ mnemonic = "punpcklbw";
+ } else if (opcode == 0x61) {
+ mnemonic = "punpcklwd";
+ } else if (opcode == 0x62) {
+ mnemonic = "punpckldq";
+ } else if (opcode == 0x63) {
+ mnemonic = "packsswb";
+ } else if (opcode == 0x64) {
+ mnemonic = "pcmpgtb";
+ } else if (opcode == 0x65) {
+ mnemonic = "pcmpgtw";
+ } else if (opcode == 0x66) {
+ mnemonic = "pcmpgtd";
+ } else if (opcode == 0x67) {
+ mnemonic = "packuswb";
+ } else if (opcode == 0x68) {
+ mnemonic = "punpckhbw";
+ } else if (opcode == 0x69) {
+ mnemonic = "punpckhwd";
+ } else if (opcode == 0x6A) {
+ mnemonic = "punpckhdq";
+ } else if (opcode == 0x6B) {
+ mnemonic = "packssdw";
+ } else if (opcode == 0x6C) {
+ mnemonic = "punpcklqdq";
+ } else if (opcode == 0x6D) {
+ mnemonic = "punpckhqdq";
+ } else if (opcode == 0x2E) {
+ mnemonic = "ucomisd";
+ } else if (opcode == 0x2F) {
+ mnemonic = "comisd";
+ } else if (opcode == 0x74) {
+ mnemonic = "pcmpeqb";
+ } else if (opcode == 0x75) {
+ mnemonic = "pcmpeqw";
+ } else if (opcode == 0x76) {
+ mnemonic = "pcmpeqd";
+ } else if (opcode == 0xC2) {
+ mnemonic = "cmppd";
+ } else if (opcode == 0xD1) {
+ mnemonic = "psrlw";
+ } else if (opcode == 0xD2) {
+ mnemonic = "psrld";
+ } else if (opcode == 0xD3) {
+ mnemonic = "psrlq";
+ } else if (opcode == 0xD4) {
+ mnemonic = "paddq";
+ } else if (opcode == 0xD5) {
+ mnemonic = "pmullw";
+ } else if (opcode == 0xD7) {
+ mnemonic = "pmovmskb";
+ } else if (opcode == 0xD8) {
+ mnemonic = "psubusb";
+ } else if (opcode == 0xD9) {
+ mnemonic = "psubusw";
+ } else if (opcode == 0xDA) {
+ mnemonic = "pminub";
+ } else if (opcode == 0xDB) {
+ mnemonic = "pand";
+ } else if (opcode == 0xDC) {
+ mnemonic = "paddusb";
+ } else if (opcode == 0xDD) {
+ mnemonic = "paddusw";
+ } else if (opcode == 0xDE) {
+ mnemonic = "pmaxub";
+ } else if (opcode == 0xE0) {
+ mnemonic = "pavgb";
+ } else if (opcode == 0xE1) {
+ mnemonic = "psraw";
+ } else if (opcode == 0xE2) {
+ mnemonic = "psrad";
+ } else if (opcode == 0xE3) {
+ mnemonic = "pavgw";
+ } else if (opcode == 0xE8) {
+ mnemonic = "psubsb";
+ } else if (opcode == 0xE9) {
+ mnemonic = "psubsw";
+ } else if (opcode == 0xEA) {
+ mnemonic = "pminsw";
+ } else if (opcode == 0xEB) {
+ mnemonic = "por";
+ } else if (opcode == 0xEC) {
+ mnemonic = "paddsb";
+ } else if (opcode == 0xED) {
+ mnemonic = "paddsw";
+ } else if (opcode == 0xEE) {
+ mnemonic = "pmaxsw";
+ } else if (opcode == 0xEF) {
+ mnemonic = "pxor";
+ } else if (opcode == 0xF1) {
+ mnemonic = "psllw";
+ } else if (opcode == 0xF2) {
+ mnemonic = "pslld";
+ } else if (opcode == 0xF3) {
+ mnemonic = "psllq";
+ } else if (opcode == 0xF4) {
+ mnemonic = "pmuludq";
+ } else if (opcode == 0xF5) {
+ mnemonic = "pmaddwd";
+ } else if (opcode == 0xF8) {
+ mnemonic = "psubb";
+ } else if (opcode == 0xF9) {
+ mnemonic = "psubw";
+ } else if (opcode == 0xFA) {
+ mnemonic = "psubd";
+ } else if (opcode == 0xFB) {
+ mnemonic = "psubq";
+ } else if (opcode == 0xFC) {
+ mnemonic = "paddb";
+ } else if (opcode == 0xFD) {
+ mnemonic = "paddw";
+ } else if (opcode == 0xFE) {
+ mnemonic = "paddd";
} else {
UnimplementedInstruction();
}
- } else if (opcode == 0xC1) {
- current += PrintOperands("xadd", OPER_REG_OP_ORDER, current);
- } else {
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x1F) {
- current++;
- if (rm == 4) { // SIB byte present.
- current++;
- }
- if (mod == 1) { // Byte displacement.
- current += 1;
- } else if (mod == 2) { // 32-bit displacement.
- current += 4;
- } // else no immediate displacement.
- AppendToBuffer("nop");
- } else if (opcode == 0x10) {
- AppendToBuffer("movupd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x11) {
- AppendToBuffer("movupd ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (opcode == 0x28) {
- AppendToBuffer("movapd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x29) {
- AppendToBuffer("movapd ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (opcode == 0x6E) {
- AppendToBuffer("mov%c %s,", rex_w() ? 'q' : 'd',
- NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- } else if (opcode == 0x6F) {
- AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x7E) {
- AppendToBuffer("mov%c ", rex_w() ? 'q' : 'd');
- current += PrintRightOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (opcode == 0x7F) {
- AppendToBuffer("movdqa ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (opcode == 0xD6) {
- AppendToBuffer("movq ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (opcode == 0x50) {
- AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x70) {
- AppendToBuffer("pshufd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",0x%x", *current);
- current += 1;
- } else if (opcode == 0x71) {
- current += 1;
- AppendToBuffer("ps%sw %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
- *current & 0x7F);
- current += 1;
- } else if (opcode == 0x72) {
- current += 1;
- AppendToBuffer("ps%sd %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
- *current & 0x7F);
- current += 1;
- } else if (opcode == 0x73) {
- current += 1;
- AppendToBuffer("ps%sq %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
- *current & 0x7F);
- current += 1;
- } else if (opcode == 0xB1) {
- current += PrintOperands("cmpxchg", OPER_REG_OP_ORDER, current);
- } else if (opcode == 0xC4) {
- AppendToBuffer("pinsrw %s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- AppendToBuffer(",0x%x", (*current) & 7);
+ // Not every opcode here has an XMM register as the dst operand.
+ const char* regop_reg =
+ opcode == 0xD7 ? NameOfCPURegister(regop) : NameOfXMMRegister(regop);
+ AppendToBuffer("%s %s,", mnemonic, regop_reg);
+ current += PrintRightXMMOperand(current);
+ if (opcode == 0xC2) {
+ const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
+ "neq", "nlt", "nle", "ord"};
+ AppendToBuffer(", (%s)", pseudo_op[*current]);
current += 1;
- } else {
- const char* mnemonic;
- if (opcode == 0x51) {
- mnemonic = "sqrtpd";
- } else if (opcode == 0x54) {
- mnemonic = "andpd";
- } else if (opcode == 0x55) {
- mnemonic = "andnpd";
- } else if (opcode == 0x56) {
- mnemonic = "orpd";
- } else if (opcode == 0x57) {
- mnemonic = "xorpd";
- } else if (opcode == 0x58) {
- mnemonic = "addpd";
- } else if (opcode == 0x59) {
- mnemonic = "mulpd";
- } else if (opcode == 0x5B) {
- mnemonic = "cvtps2dq";
- } else if (opcode == 0x5C) {
- mnemonic = "subpd";
- } else if (opcode == 0x5D) {
- mnemonic = "minpd";
- } else if (opcode == 0x5E) {
- mnemonic = "divpd";
- } else if (opcode == 0x5F) {
- mnemonic = "maxpd";
- } else if (opcode == 0x60) {
- mnemonic = "punpcklbw";
- } else if (opcode == 0x61) {
- mnemonic = "punpcklwd";
- } else if (opcode == 0x62) {
- mnemonic = "punpckldq";
- } else if (opcode == 0x63) {
- mnemonic = "packsswb";
- } else if (opcode == 0x64) {
- mnemonic = "pcmpgtb";
- } else if (opcode == 0x65) {
- mnemonic = "pcmpgtw";
- } else if (opcode == 0x66) {
- mnemonic = "pcmpgtd";
- } else if (opcode == 0x67) {
- mnemonic = "packuswb";
- } else if (opcode == 0x68) {
- mnemonic = "punpckhbw";
- } else if (opcode == 0x69) {
- mnemonic = "punpckhwd";
- } else if (opcode == 0x6A) {
- mnemonic = "punpckhdq";
- } else if (opcode == 0x6B) {
- mnemonic = "packssdw";
- } else if (opcode == 0x6C) {
- mnemonic = "punpcklqdq";
- } else if (opcode == 0x6D) {
- mnemonic = "punpckhqdq";
- } else if (opcode == 0x2E) {
- mnemonic = "ucomisd";
- } else if (opcode == 0x2F) {
- mnemonic = "comisd";
- } else if (opcode == 0x74) {
- mnemonic = "pcmpeqb";
- } else if (opcode == 0x75) {
- mnemonic = "pcmpeqw";
- } else if (opcode == 0x76) {
- mnemonic = "pcmpeqd";
- } else if (opcode == 0xC2) {
- mnemonic = "cmppd";
- } else if (opcode == 0xD1) {
- mnemonic = "psrlw";
- } else if (opcode == 0xD2) {
- mnemonic = "psrld";
- } else if (opcode == 0xD3) {
- mnemonic = "psrlq";
- } else if (opcode == 0xD4) {
- mnemonic = "paddq";
- } else if (opcode == 0xD5) {
- mnemonic = "pmullw";
- } else if (opcode == 0xD7) {
- mnemonic = "pmovmskb";
- } else if (opcode == 0xD8) {
- mnemonic = "psubusb";
- } else if (opcode == 0xD9) {
- mnemonic = "psubusw";
- } else if (opcode == 0xDA) {
- mnemonic = "pminub";
- } else if (opcode == 0xDB) {
- mnemonic = "pand";
- } else if (opcode == 0xDC) {
- mnemonic = "paddusb";
- } else if (opcode == 0xDD) {
- mnemonic = "paddusw";
- } else if (opcode == 0xDE) {
- mnemonic = "pmaxub";
- } else if (opcode == 0xE0) {
- mnemonic = "pavgb";
- } else if (opcode == 0xE1) {
- mnemonic = "psraw";
- } else if (opcode == 0xE2) {
- mnemonic = "psrad";
- } else if (opcode == 0xE3) {
- mnemonic = "pavgw";
- } else if (opcode == 0xE8) {
- mnemonic = "psubsb";
- } else if (opcode == 0xE9) {
- mnemonic = "psubsw";
- } else if (opcode == 0xEA) {
- mnemonic = "pminsw";
- } else if (opcode == 0xEB) {
- mnemonic = "por";
- } else if (opcode == 0xEC) {
- mnemonic = "paddsb";
- } else if (opcode == 0xED) {
- mnemonic = "paddsw";
- } else if (opcode == 0xEE) {
- mnemonic = "pmaxsw";
- } else if (opcode == 0xEF) {
- mnemonic = "pxor";
- } else if (opcode == 0xF1) {
- mnemonic = "psllw";
- } else if (opcode == 0xF2) {
- mnemonic = "pslld";
- } else if (opcode == 0xF3) {
- mnemonic = "psllq";
- } else if (opcode == 0xF4) {
- mnemonic = "pmuludq";
- } else if (opcode == 0xF5) {
- mnemonic = "pmaddwd";
- } else if (opcode == 0xF8) {
- mnemonic = "psubb";
- } else if (opcode == 0xF9) {
- mnemonic = "psubw";
- } else if (opcode == 0xFA) {
- mnemonic = "psubd";
- } else if (opcode == 0xFB) {
- mnemonic = "psubq";
- } else if (opcode == 0xFC) {
- mnemonic = "paddb";
- } else if (opcode == 0xFD) {
- mnemonic = "paddw";
- } else if (opcode == 0xFE) {
- mnemonic = "paddd";
- } else {
- UnimplementedInstruction();
- }
- // Not every opcode here has an XMM register as the dst operand.
- const char* regop_reg = opcode == 0xD7 ? NameOfCPURegister(regop)
- : NameOfXMMRegister(regop);
- AppendToBuffer("%s %s,", mnemonic, regop_reg);
- current += PrintRightXMMOperand(current);
- if (opcode == 0xC2) {
- const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
- "neq", "nlt", "nle", "ord"};
- AppendToBuffer(", (%s)", pseudo_op[*current]);
- current += 1;
- }
}
}
} else if (group_1_prefix_ == 0xF2) {
// Beginning of instructions with prefix 0xF2.
-
- if (opcode == 0x11 || opcode == 0x10) {
+ if (opcode == 0x10) {
// MOVSD: Move scalar double-precision fp to/from/between XMM registers.
- AppendToBuffer("movsd ");
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x11) {
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- AppendToBuffer("%s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- }
+ current += PrintOperands("movsd", XMMREG_XMMOPER_OP_ORDER, current);
+ } else if (opcode == 0x11) {
+ current += PrintOperands("movsd", XMMOPER_XMMREG_OP_ORDER, current);
} else if (opcode == 0x12) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movddup %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintOperands("movddup", XMMREG_XMMOPER_OP_ORDER, current);
} else if (opcode == 0x2A) {
// CVTSI2SD: integer to XMM double conversion.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
+ current += PrintOperands(mnemonic, XMMREG_OPER_OP_ORDER, current);
} else if (opcode == 0x2C) {
// CVTTSD2SI:
// Convert with truncation scalar double-precision FP to integer.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("cvttsd2si%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x2D) {
// CVTSD2SI: Convert scalar double-precision FP to integer.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("cvtsd2si%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x5B) {
// CVTTPS2DQ: Convert packed single-precision FP values to packed signed
// doubleword integer values
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("cvttps2dq%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
// XMM arithmetic. Mnemonic was retrieved at the start of this function.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintOperands(mnemonic, XMMREG_XMMOPER_OP_ORDER, current);
} else if (opcode == 0x70) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pshuflw %s, ", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(", %d", (*current) & 7);
- current += 1;
+ current += PrintOperands("pshuflw", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",%d", (*current++) & 7);
} else if (opcode == 0xC2) {
// Intel manual 2A, Table 3-18.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
const char* const pseudo_op[] = {"cmpeqsd", "cmpltsd", "cmplesd",
"cmpunordsd", "cmpneqsd", "cmpnltsd",
"cmpnlesd", "cmpordsd"};
@@ -2238,97 +2140,54 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
NameOfXMMRegister(regop), NameOfXMMRegister(rm));
current += 2;
} else if (opcode == 0xF0) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("lddqu %s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
+ current += PrintOperands("lddqu", XMMREG_OPER_OP_ORDER, current);
} else if (opcode == 0x7C) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("haddps %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintOperands("haddps", XMMREG_XMMOPER_OP_ORDER, current);
} else {
UnimplementedInstruction();
}
} else if (group_1_prefix_ == 0xF3) {
// Instructions with prefix 0xF3.
- if (opcode == 0x11 || opcode == 0x10) {
+ if (opcode == 0x10) {
// MOVSS: Move scalar double-precision fp to/from/between XMM registers.
- AppendToBuffer("movss ");
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x11) {
- current += PrintRightOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- AppendToBuffer("%s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- }
+ current += PrintOperands("movss", XMMREG_OPER_OP_ORDER, current);
+ } else if (opcode == 0x11) {
+ current += PrintOperands("movss", OPER_XMMREG_OP_ORDER, current);
} else if (opcode == 0x2A) {
// CVTSI2SS: integer to XMM single conversion.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
+ current += PrintOperands(mnemonic, XMMREG_OPER_OP_ORDER, current);
} else if (opcode == 0x2C) {
// CVTTSS2SI:
// Convert with truncation scalar single-precision FP to dword integer.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("cvttss2si%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x70) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pshufhw %s, ", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(", %d", (*current) & 7);
- current += 1;
+ current += PrintOperands("pshufhw", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(", %d", (*current++) & 7);
} else if (opcode == 0x6F) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintOperands("movdqu", XMMREG_XMMOPER_OP_ORDER, current);
} else if (opcode == 0x7E) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movq %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintOperands("movq", XMMREG_XMMOPER_OP_ORDER, current);
} else if (opcode == 0x7F) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movdqu ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintOperands("movdqu", XMMOPER_XMMREG_OP_ORDER, current);
} else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
// XMM arithmetic. Mnemonic was retrieved at the start of this function.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintOperands(mnemonic, XMMREG_XMMOPER_OP_ORDER, current);
} else if (opcode == 0xB8) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("popcnt%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
} else if (opcode == 0xBC) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("tzcnt%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
} else if (opcode == 0xBD) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("lzcnt%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
} else if (opcode == 0xC2) {
// Intel manual 2A, Table 3-18.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
const char* const pseudo_op[] = {"cmpeqss", "cmpltss", "cmpless",
"cmpunordss", "cmpneqss", "cmpnltss",
"cmpnless", "cmpordss"};
@@ -2338,29 +2197,37 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else {
UnimplementedInstruction();
}
- } else if (opcode == 0x10 || opcode == 0x11) {
+ } else if (opcode == 0x10) {
// movups xmm, xmm/m128
+ current += PrintOperands("movups", XMMREG_XMMOPER_OP_ORDER, current);
+ } else if (opcode == 0x11) {
// movups xmm/m128, xmm
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movups ");
- if (opcode == 0x11) {
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- AppendToBuffer("%s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- }
+ current += PrintOperands("movups", XMMOPER_XMMREG_OP_ORDER, current);
+ } else if (opcode == 0x12) {
+ // movlps xmm1, m64
+ current += PrintOperands("movlps", XMMREG_OPER_OP_ORDER, current);
+ } else if (opcode == 0x13) {
+ // movlps m64, xmm1
+ AppendToBuffer("movlps ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x16) {
// movlhps xmm1, xmm2
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movlhps %s,", NameOfXMMRegister(regop));
+ // movhps xmm1, m64
+ if (mod == 0b11) {
+ AppendToBuffer("movlhps ");
+ } else {
+ AppendToBuffer("movhps ");
+ }
+ AppendToBuffer("%s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x17) {
+ // movhps m64, xmm1
+ AppendToBuffer("movhps ");
current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x1F) {
// NOP
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
current++;
if (rm == 4) { // SIB byte present.
current++;
@@ -2374,22 +2241,16 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x28) {
// movaps xmm, xmm/m128
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("movaps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x29) {
// movaps xmm/m128, xmm
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("movaps ");
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x2E) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("ucomiss %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0xA2) {
@@ -2408,8 +2269,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
"orps", "xorps", "addps", "mulps", "cvtps2pd",
"cvtdq2ps", "subps", "minps", "divps", "maxps",
};
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("%s %s,", pseudo_op[opcode - 0x51],
NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -2421,8 +2280,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintOperands("xadd", OPER_REG_OP_ORDER, current);
} else if (opcode == 0xC2) {
// cmpps xmm, xmm/m128, imm8
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
"neq", "nlt", "nle", "ord"};
AppendToBuffer("cmpps %s, ", NameOfXMMRegister(regop));
@@ -2431,8 +2288,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += 1;
} else if (opcode == 0xC6) {
// shufps xmm, xmm/m128, imm8
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("shufps %s, ", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
AppendToBuffer(", %d", (*current) & 3);
@@ -2443,8 +2298,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer("bswap%c %s", operand_size_code(), NameOfCPURegister(reg));
} else if (opcode == 0x50) {
// movmskps reg, xmm
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("movmskps %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if ((opcode & 0xF0) == 0x80) {
@@ -2463,8 +2316,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
// BT (bit test), SHLD, BTS (bit test and set),
// SHRD (double-precision shift)
AppendToBuffer("%s ", mnemonic);
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
current += PrintRightOperand(current);
if (opcode == 0xAB) {
AppendToBuffer(",%s", NameOfCPURegister(regop));
@@ -2473,8 +2324,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
}
} else if (opcode == 0xBA) {
// BTS / BTR (bit test and set/reset) with immediate
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
mnemonic = regop == 5 ? "bts" : regop == 6 ? "btr" : "?";
AppendToBuffer("%s ", mnemonic);
current += PrintRightOperand(current);
@@ -2482,8 +2331,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0xB8 || opcode == 0xBC || opcode == 0xBD) {
// POPCNT, CTZ, CLZ.
AppendToBuffer("%s%c ", mnemonic, operand_size_code());
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("%s,", NameOfCPURegister(regop));
current += PrintRightOperand(current);
} else if (opcode == 0x0B) {
@@ -2506,6 +2353,102 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
return static_cast<int>(current - data);
}
+// Handle all three-byte opcodes, which start with 0x0F38 or 0x0F3A.
+// These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix, but we
+// only have instructions prefixed with 0x66 for now.
+int DisassemblerX64::ThreeByteOpcodeInstruction(byte* data) {
+ DCHECK_EQ(0x0F, *data);
+ // Only support 3-byte opcodes prefixed with 0x66 for now.
+ DCHECK_EQ(0x66, operand_size_);
+ byte second_byte = *(data + 1);
+ byte third_byte = *(data + 2);
+ byte* current = data + 3;
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ if (second_byte == 0x38) {
+ switch (third_byte) {
+ case 0x10: {
+ AppendToBuffer("pblendvb %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",<xmm0>");
+ break;
+ }
+ case 0x14: {
+ AppendToBuffer("blendvps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",<xmm0>");
+ break;
+ }
+ case 0x15: {
+ current += PrintOperands("blendvpd", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",<xmm0>");
+ break;
+ }
+#define SSE34_DIS_CASE(instruction, notUsed1, notUsed2, notUsed3, opcode) \
+ case 0x##opcode: { \
+ current += PrintOperands(#instruction, XMMREG_XMMOPER_OP_ORDER, current); \
+ break; \
+ }
+
+ SSSE3_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSSE3_UNOP_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSE4_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSE4_UNOP_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSE4_2_INSTRUCTION_LIST(SSE34_DIS_CASE)
+#undef SSE34_DIS_CASE
+ default:
+ UnimplementedInstruction();
+ }
+ } else {
+ DCHECK_EQ(0x3A, second_byte);
+ if (third_byte == 0x17) {
+ current += PrintOperands("extractps", OPER_XMMREG_OP_ORDER, current);
+ AppendToBuffer(",%d", (*current++) & 3);
+ } else if (third_byte == 0x08) {
+ current += PrintOperands("roundps", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", (*current++) & 3);
+ } else if (third_byte == 0x09) {
+ current += PrintOperands("roundpd", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", (*current++) & 3);
+ } else if (third_byte == 0x0A) {
+ current += PrintOperands("roundss", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", (*current++) & 3);
+ } else if (third_byte == 0x0B) {
+ current += PrintOperands("roundsd", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", (*current++) & 3);
+ } else if (third_byte == 0x0E) {
+ current += PrintOperands("pblendw", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", *current++);
+ } else if (third_byte == 0x0F) {
+ current += PrintOperands("palignr", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", *current++);
+ } else if (third_byte == 0x14) {
+ current += PrintOperands("pextrb", OPER_XMMREG_OP_ORDER, current);
+ AppendToBuffer(",%d", (*current++) & 0xf);
+ } else if (third_byte == 0x15) {
+ current += PrintOperands("pextrw", OPER_XMMREG_OP_ORDER, current);
+ AppendToBuffer(",%d", (*current++) & 7);
+ } else if (third_byte == 0x16) {
+ const char* mnem = rex_w() ? "pextrq" : "pextrd";
+ current += PrintOperands(mnem, OPER_XMMREG_OP_ORDER, current);
+ AppendToBuffer(",%d", (*current++) & 3);
+ } else if (third_byte == 0x20) {
+ current += PrintOperands("pinsrb", XMMREG_OPER_OP_ORDER, current);
+ AppendToBuffer(",%d", (*current++) & 3);
+ } else if (third_byte == 0x21) {
+ current += PrintOperands("insertps", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", *current++);
+ } else if (third_byte == 0x22) {
+ const char* mnem = rex_w() ? "pinsrq" : "pinsrd";
+ current += PrintOperands(mnem, XMMREG_OPER_OP_ORDER, current);
+ AppendToBuffer(",%d", (*current++) & 3);
+ } else {
+ UnimplementedInstruction();
+ }
+ }
+ return static_cast<int>(current - data);
+}
+
// Mnemonics for two-byte opcode instructions starting with 0x0F.
// The argument is the second byte of the two-byte opcode.
// Returns nullptr if the instruction is not handled here.
@@ -2730,7 +2673,12 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0x0F:
- data += TwoByteOpcodeInstruction(data);
+ // Check for three-byte opcodes, 0x0F38 or 0x0F3A.
+ if (*(data + 1) == 0x38 || *(data + 1) == 0x3A) {
+ data += ThreeByteOpcodeInstruction(data);
+ } else {
+ data += TwoByteOpcodeInstruction(data);
+ }
break;
case 0x8F: {
diff --git a/deps/v8/src/diagnostics/x64/unwinder-x64.cc b/deps/v8/src/diagnostics/x64/unwinder-x64.cc
new file mode 100644
index 0000000000..5a92512a17
--- /dev/null
+++ b/deps/v8/src/diagnostics/x64/unwinder-x64.cc
@@ -0,0 +1,12 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/diagnostics/unwinder.h"
+
+namespace v8 {
+
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+
+} // namespace v8
diff --git a/deps/v8/src/execution/DIR_METADATA b/deps/v8/src/execution/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/execution/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/execution/OWNERS b/deps/v8/src/execution/OWNERS
index ea38b071ed..1e89f1e750 100644
--- a/deps/v8/src/execution/OWNERS
+++ b/deps/v8/src/execution/OWNERS
@@ -9,5 +9,3 @@ szuend@chromium.org
verwaest@chromium.org
per-file futex-emulation.*=marja@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/execution/arguments.h b/deps/v8/src/execution/arguments.h
index d2798e6f76..39877cf4d2 100644
--- a/deps/v8/src/execution/arguments.h
+++ b/deps/v8/src/execution/arguments.h
@@ -62,11 +62,9 @@ class Arguments {
inline Address* address_of_arg_at(int index) const {
DCHECK_LE(static_cast<uint32_t>(index), static_cast<uint32_t>(length_));
uintptr_t offset = index * kSystemPointerSize;
-#ifdef V8_REVERSE_JSARGS
if (arguments_type == ArgumentsType::kJS) {
offset = (length_ - index - 1) * kSystemPointerSize;
}
-#endif
return reinterpret_cast<Address*>(reinterpret_cast<Address>(arguments_) -
offset);
}
@@ -77,17 +75,13 @@ class Arguments {
// Arguments on the stack are in reverse order (compared to an array).
FullObjectSlot first_slot() const {
int index = length() - 1;
-#ifdef V8_REVERSE_JSARGS
if (arguments_type == ArgumentsType::kJS) index = 0;
-#endif
return slot_at(index);
}
FullObjectSlot last_slot() const {
int index = 0;
-#ifdef V8_REVERSE_JSARGS
if (arguments_type == ArgumentsType::kJS) index = length() - 1;
-#endif
return slot_at(index);
}
diff --git a/deps/v8/src/execution/arm/frame-constants-arm.h b/deps/v8/src/execution/arm/frame-constants-arm.h
index e8bee055d2..47e901ea99 100644
--- a/deps/v8/src/execution/arm/frame-constants-arm.h
+++ b/deps/v8/src/execution/arm/frame-constants-arm.h
@@ -43,12 +43,14 @@ class EntryFrameConstants : public AllStatic {
static constexpr int kArgvOffset = +1 * kSystemPointerSize;
// These offsets refer to the immediate caller (i.e a native frame).
- static constexpr int kDirectCallerFPOffset =
+ static constexpr int kDirectCallerRRegistersOffset =
/* bad frame pointer (-1) */
kPointerSize +
/* d8...d15 */
- kNumDoubleCalleeSaved * kDoubleSize +
- /* r4...r10 (i.e callee saved without fp) */
+ kNumDoubleCalleeSaved * kDoubleSize;
+ static constexpr int kDirectCallerFPOffset =
+ kDirectCallerRRegistersOffset +
+ /* r4...r10 (i.e. callee saved without fp) */
(kNumCalleeSaved - 1) * kPointerSize;
static constexpr int kDirectCallerPCOffset =
kDirectCallerFPOffset + 1 * kSystemPointerSize;
diff --git a/deps/v8/src/execution/arm/simulator-arm.cc b/deps/v8/src/execution/arm/simulator-arm.cc
index 3c6368d8f5..3df283e2fd 100644
--- a/deps/v8/src/execution/arm/simulator-arm.cc
+++ b/deps/v8/src/execution/arm/simulator-arm.cc
@@ -900,10 +900,25 @@ void Simulator::SetFpResult(const double& result) {
}
void Simulator::TrashCallerSaveRegisters() {
- // We don't trash the registers with the return value.
+ // Return registers.
+ registers_[0] = 0x50BAD4U;
+ registers_[1] = 0x50BAD4U;
+ // Caller-saved registers.
registers_[2] = 0x50BAD4U;
registers_[3] = 0x50BAD4U;
registers_[12] = 0x50BAD4U;
+ // This value is a NaN in both 32-bit and 64-bit FP.
+ static const uint64_t v = 0x7ff000007f801000UL;
+ // d0 - d7 are caller-saved.
+ for (int i = 0; i < 8; i++) {
+ set_d_register(i, &v);
+ }
+ if (DoubleRegister::SupportedRegisterCount() > 16) {
+ // d16 - d31 (if supported) are caller-saved.
+ for (int i = 16; i < 32; i++) {
+ set_d_register(i, &v);
+ }
+ }
}
int Simulator::ReadW(int32_t addr) {
@@ -1673,6 +1688,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeCompareCall target =
reinterpret_cast<SimulatorRuntimeCompareCall>(external);
iresult = target(dval0, dval1);
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
set_register(r0, static_cast<int32_t>(iresult));
set_register(r1, static_cast<int32_t>(iresult >> 32));
break;
@@ -1681,6 +1699,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeFPFPCall target =
reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
dresult = target(dval0, dval1);
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
SetFpResult(dresult);
break;
}
@@ -1688,6 +1709,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external);
dresult = target(dval0);
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
SetFpResult(dresult);
break;
}
@@ -1695,6 +1719,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeFPIntCall target =
reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
dresult = target(dval0, ival);
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
SetFpResult(dresult);
break;
}
@@ -1728,6 +1755,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
CHECK(stack_aligned);
UnsafeDirectApiCall(external, arg0);
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
} else if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
@@ -1739,6 +1769,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
CHECK(stack_aligned);
UnsafeProfilingApiCall(external, arg0, arg1);
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
@@ -1750,6 +1783,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
CHECK(stack_aligned);
UnsafeDirectGetterCall(external, arg0, arg1);
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
} else if (redirection->type() ==
ExternalReference::PROFILING_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@@ -1764,6 +1800,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
target(arg0, arg1, Redirection::ReverseRedirection(arg2));
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
} else {
// builtin call.
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
@@ -1783,6 +1822,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
int64_t result =
UnsafeGenericFunctionCall(external, arg0, arg1, arg2, arg3, arg4,
arg5, arg6, arg7, arg8, arg9);
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) {
@@ -3836,6 +3878,32 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
}
}
+// Helper functions for implementing NEON ops. Unop applies a unary op to each
+// lane. Binop applies a binary operation to matching input lanes.
+template <typename T>
+void Unop(Simulator* simulator, int Vd, int Vm, std::function<T(T)> unop) {
+ static const int kLanes = 16 / sizeof(T);
+ T src[kLanes];
+ simulator->get_neon_register(Vm, src);
+ for (int i = 0; i < kLanes; i++) {
+ src[i] = unop(src[i]);
+ }
+ simulator->set_neon_register(Vd, src);
+}
+
+template <typename T>
+void Binop(Simulator* simulator, int Vd, int Vm, int Vn,
+ std::function<T(T, T)> binop) {
+ static const int kLanes = 16 / sizeof(T);
+ T src1[kLanes], src2[kLanes];
+ simulator->get_neon_register(Vn, src1);
+ simulator->get_neon_register(Vm, src2);
+ for (int i = 0; i < kLanes; i++) {
+ src1[i] = binop(src1[i], src2[i]);
+ }
+ simulator->set_neon_register(Vd, src1);
+}
+
// Templated operations for NEON instructions.
template <typename T, typename U>
U Widen(T value) {
@@ -3857,15 +3925,6 @@ U Narrow(T value) {
return static_cast<U>(value);
}
-template <typename T>
-T Clamp(int64_t value) {
- static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
- int64_t min = static_cast<int64_t>(std::numeric_limits<T>::min());
- int64_t max = static_cast<int64_t>(std::numeric_limits<T>::max());
- int64_t clamped = std::max(min, std::min(max, value));
- return static_cast<T>(clamped);
-}
-
template <typename T, typename U>
void Widen(Simulator* simulator, int Vd, int Vm) {
static const int kLanes = 8 / sizeof(T);
@@ -3880,28 +3939,15 @@ void Widen(Simulator* simulator, int Vd, int Vm) {
template <typename T, int SIZE>
void Abs(Simulator* simulator, int Vd, int Vm) {
- static const int kElems = SIZE / sizeof(T);
- T src[kElems];
- simulator->get_neon_register<T, SIZE>(Vm, src);
- for (int i = 0; i < kElems; i++) {
- src[i] = std::abs(src[i]);
- }
- simulator->set_neon_register<T, SIZE>(Vd, src);
+ Unop<T>(simulator, Vd, Vm, [](T x) { return std::abs(x); });
}
template <typename T, int SIZE>
void Neg(Simulator* simulator, int Vd, int Vm) {
- static const int kElems = SIZE / sizeof(T);
- T src[kElems];
- simulator->get_neon_register<T, SIZE>(Vm, src);
- for (int i = 0; i < kElems; i++) {
- if (src[i] != std::numeric_limits<T>::min()) {
- src[i] = -src[i];
- } else {
- // The respective minimum (negative) value maps to itself.
- }
- }
- simulator->set_neon_register<T, SIZE>(Vd, src);
+ Unop<T>(simulator, Vd, Vm, [](T x) {
+ // The respective minimum (negative) value maps to itself.
+ return x == std::numeric_limits<T>::min() ? x : -x;
+ });
}
template <typename T, typename U>
@@ -3911,7 +3957,7 @@ void SaturatingNarrow(Simulator* simulator, int Vd, int Vm) {
U dst[kLanes];
simulator->get_neon_register(Vm, src);
for (int i = 0; i < kLanes; i++) {
- dst[i] = Narrow<T, U>(Clamp<U>(src[i]));
+ dst[i] = Narrow<T, U>(Saturate<U>(src[i]));
}
simulator->set_neon_register<U, kDoubleSize>(Vd, dst);
}
@@ -3923,33 +3969,19 @@ void SaturatingUnsignedNarrow(Simulator* simulator, int Vd, int Vm) {
U dst[kLanes];
simulator->get_neon_register(Vm, src);
for (int i = 0; i < kLanes; i++) {
- dst[i] = Clamp<U>(src[i]);
+ dst[i] = Saturate<U>(src[i]);
}
simulator->set_neon_register<U, kDoubleSize>(Vd, dst);
}
template <typename T>
-void AddSaturate(Simulator* simulator, int Vd, int Vm, int Vn) {
- static const int kLanes = 16 / sizeof(T);
- T src1[kLanes], src2[kLanes];
- simulator->get_neon_register(Vn, src1);
- simulator->get_neon_register(Vm, src2);
- for (int i = 0; i < kLanes; i++) {
- src1[i] = Clamp<T>(Widen<T, int64_t>(src1[i]) + Widen<T, int64_t>(src2[i]));
- }
- simulator->set_neon_register(Vd, src1);
+void AddSat(Simulator* simulator, int Vd, int Vm, int Vn) {
+ Binop<T>(simulator, Vd, Vm, Vn, SaturateAdd<T>);
}
template <typename T>
-void SubSaturate(Simulator* simulator, int Vd, int Vm, int Vn) {
- static const int kLanes = 16 / sizeof(T);
- T src1[kLanes], src2[kLanes];
- simulator->get_neon_register(Vn, src1);
- simulator->get_neon_register(Vm, src2);
- for (int i = 0; i < kLanes; i++) {
- src1[i] = SaturateSub<T>(src1[i], src2[i]);
- }
- simulator->set_neon_register(Vd, src1);
+void SubSat(Simulator* simulator, int Vd, int Vm, int Vn) {
+ Binop<T>(simulator, Vd, Vm, Vn, SaturateSub<T>);
}
template <typename T, int SIZE>
@@ -4002,38 +4034,18 @@ void Transpose(Simulator* simulator, int Vd, int Vm) {
template <typename T, int SIZE>
void Test(Simulator* simulator, int Vd, int Vm, int Vn) {
- static const int kElems = SIZE / sizeof(T);
- T src1[kElems], src2[kElems];
- simulator->get_neon_register<T, SIZE>(Vn, src1);
- simulator->get_neon_register<T, SIZE>(Vm, src2);
- for (int i = 0; i < kElems; i++) {
- src1[i] = (src1[i] & src2[i]) != 0 ? -1 : 0;
- }
- simulator->set_neon_register<T, SIZE>(Vd, src1);
+ auto test = [](T x, T y) { return (x & y) ? -1 : 0; };
+ Binop<T>(simulator, Vd, Vm, Vn, test);
}
template <typename T, int SIZE>
void Add(Simulator* simulator, int Vd, int Vm, int Vn) {
- static const int kElems = SIZE / sizeof(T);
- T src1[kElems], src2[kElems];
- simulator->get_neon_register<T, SIZE>(Vn, src1);
- simulator->get_neon_register<T, SIZE>(Vm, src2);
- for (int i = 0; i < kElems; i++) {
- src1[i] += src2[i];
- }
- simulator->set_neon_register<T, SIZE>(Vd, src1);
+ Binop<T>(simulator, Vd, Vm, Vn, std::plus<T>());
}
template <typename T, int SIZE>
void Sub(Simulator* simulator, int Vd, int Vm, int Vn) {
- static const int kElems = SIZE / sizeof(T);
- T src1[kElems], src2[kElems];
- simulator->get_neon_register<T, SIZE>(Vn, src1);
- simulator->get_neon_register<T, SIZE>(Vm, src2);
- for (int i = 0; i < kElems; i++) {
- src1[i] -= src2[i];
- }
- simulator->set_neon_register<T, SIZE>(Vd, src1);
+ Binop<T>(simulator, Vd, Vm, Vn, std::minus<T>());
}
namespace {
@@ -4048,7 +4060,9 @@ uint16_t Multiply(uint16_t a, uint16_t b) {
void VmovImmediate(Simulator* simulator, Instruction* instr) {
byte cmode = instr->Bits(11, 8);
- int vd = instr->VFPDRegValue(kSimd128Precision);
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ int q = instr->Bit(6);
+ int regs = q ? 2 : 1;
uint8_t imm = instr->Bit(24) << 7; // i
imm |= instr->Bits(18, 16) << 4; // imm3
imm |= instr->Bits(3, 0); // imm4
@@ -4056,14 +4070,20 @@ void VmovImmediate(Simulator* simulator, Instruction* instr) {
case 0: {
// Set the LSB of each 64-bit halves.
uint64_t imm64 = imm;
- simulator->set_neon_register(vd, {imm64, imm64});
+ for (int r = 0; r < regs; r++) {
+ simulator->set_d_register(vd + r, &imm64);
+ }
break;
}
case 0xe: {
uint8_t imms[kSimd128Size];
// Set all bytes of register.
std::fill_n(imms, kSimd128Size, imm);
- simulator->set_neon_register(vd, imms);
+ uint64_t imm64;
+ memcpy(&imm64, imms, 8);
+ for (int r = 0; r < regs; r++) {
+ simulator->set_d_register(vd + r, &imm64);
+ }
break;
}
default: {
@@ -4087,35 +4107,19 @@ void Mul(Simulator* simulator, int Vd, int Vm, int Vn) {
template <typename T, int SIZE>
void ShiftLeft(Simulator* simulator, int Vd, int Vm, int shift) {
- static const int kElems = SIZE / sizeof(T);
- T src[kElems];
- simulator->get_neon_register<T, SIZE>(Vm, src);
- for (int i = 0; i < kElems; i++) {
- src[i] <<= shift;
- }
- simulator->set_neon_register<T, SIZE>(Vd, src);
+ Unop<T>(simulator, Vd, Vm, [shift](T x) { return x << shift; });
}
template <typename T, int SIZE>
void ShiftRight(Simulator* simulator, int Vd, int Vm, int shift) {
- static const int kElems = SIZE / sizeof(T);
- T src[kElems];
- simulator->get_neon_register<T, SIZE>(Vm, src);
- for (int i = 0; i < kElems; i++) {
- src[i] >>= shift;
- }
- simulator->set_neon_register<T, SIZE>(Vd, src);
+ Unop<T>(simulator, Vd, Vm, [shift](T x) { return x >> shift; });
}
template <typename T, int SIZE>
void ArithmeticShiftRight(Simulator* simulator, int Vd, int Vm, int shift) {
- static const int kElems = SIZE / sizeof(T);
- T src[kElems];
- simulator->get_neon_register<T, SIZE>(Vm, src);
- for (int i = 0; i < kElems; i++) {
- src[i] = ArithmeticShiftRight(src[i], shift);
- }
- simulator->set_neon_register<T, SIZE>(Vd, src);
+ auto shift_fn =
+ std::bind(ArithmeticShiftRight<T>, std::placeholders::_1, shift);
+ Unop<T>(simulator, Vd, Vm, shift_fn);
}
template <typename T, int SIZE>
@@ -4182,29 +4186,16 @@ void ShiftByRegister(Simulator* simulator, int Vd, int Vm, int Vn) {
template <typename T, int SIZE>
void CompareEqual(Simulator* simulator, int Vd, int Vm, int Vn) {
- static const int kElems = SIZE / sizeof(T);
- T src1[kElems], src2[kElems];
- simulator->get_neon_register<T, SIZE>(Vn, src1);
- simulator->get_neon_register<T, SIZE>(Vm, src2);
- for (int i = 0; i < kElems; i++) {
- src1[i] = src1[i] == src2[i] ? -1 : 0;
- }
- simulator->set_neon_register<T, SIZE>(Vd, src1);
+ Binop<T>(simulator, Vd, Vm, Vn, [](T x, T y) { return x == y ? -1 : 0; });
}
template <typename T, int SIZE>
void CompareGreater(Simulator* simulator, int Vd, int Vm, int Vn, bool ge) {
- static const int kElems = SIZE / sizeof(T);
- T src1[kElems], src2[kElems];
- simulator->get_neon_register<T, SIZE>(Vn, src1);
- simulator->get_neon_register<T, SIZE>(Vm, src2);
- for (int i = 0; i < kElems; i++) {
- if (ge)
- src1[i] = src1[i] >= src2[i] ? -1 : 0;
- else
- src1[i] = src1[i] > src2[i] ? -1 : 0;
+ if (ge) {
+ Binop<T>(simulator, Vd, Vm, Vn, [](T x, T y) { return x >= y ? -1 : 0; });
+ } else {
+ Binop<T>(simulator, Vd, Vm, Vn, [](T x, T y) { return x > y ? -1 : 0; });
}
- simulator->set_neon_register<T, SIZE>(Vd, src1);
}
float MinMax(float a, float b, bool is_min) {
@@ -4217,14 +4208,13 @@ T MinMax(T a, T b, bool is_min) {
template <typename T, int SIZE>
void MinMax(Simulator* simulator, int Vd, int Vm, int Vn, bool min) {
- static const int kElems = SIZE / sizeof(T);
- T src1[kElems], src2[kElems];
- simulator->get_neon_register<T, SIZE>(Vn, src1);
- simulator->get_neon_register<T, SIZE>(Vm, src2);
- for (int i = 0; i < kElems; i++) {
- src1[i] = MinMax(src1[i], src2[i], min);
+ if (min) {
+ Binop<T>(simulator, Vd, Vm, Vn,
+ [](auto x, auto y) { return std::min<T>(x, y); });
+ } else {
+ Binop<T>(simulator, Vd, Vm, Vn,
+ [](auto x, auto y) { return std::max<T>(x, y); });
}
- simulator->set_neon_register<T, SIZE>(Vd, src1);
}
template <typename T>
@@ -4259,14 +4249,7 @@ template <typename T, int SIZE = kSimd128Size>
void RoundingAverageUnsigned(Simulator* simulator, int Vd, int Vm, int Vn) {
static_assert(std::is_unsigned<T>::value,
"Implemented only for unsigned types.");
- static const int kElems = SIZE / sizeof(T);
- T src1[kElems], src2[kElems];
- simulator->get_neon_register<T, SIZE>(Vn, src1);
- simulator->get_neon_register<T, SIZE>(Vm, src2);
- for (int i = 0; i < kElems; i++) {
- src1[i] = base::RoundingAverageUnsigned(src1[i], src2[i]);
- }
- simulator->set_neon_register<T, SIZE>(Vd, src1);
+ Binop<T>(simulator, Vd, Vm, Vn, base::RoundingAverageUnsigned<T>);
}
template <typename NarrowType, typename WideType>
@@ -4291,338 +4274,378 @@ void MultiplyLong(Simulator* simulator, int Vd, int Vn, int Vm) {
simulator->set_neon_register<WideType>(Vd, dst);
}
-void Simulator::DecodeSpecialCondition(Instruction* instr) {
- switch (instr->SpecialValue()) {
- case 4: {
- int Vd, Vm, Vn;
- if (instr->Bit(6) == 0) {
- Vd = instr->VFPDRegValue(kDoublePrecision);
- Vm = instr->VFPMRegValue(kDoublePrecision);
- Vn = instr->VFPNRegValue(kDoublePrecision);
- } else {
- Vd = instr->VFPDRegValue(kSimd128Precision);
- Vm = instr->VFPMRegValue(kSimd128Precision);
- Vn = instr->VFPNRegValue(kSimd128Precision);
- }
- switch (instr->Bits(11, 8)) {
- case 0x0: {
- if (instr->Bit(4) == 1) {
- // vqadd.s<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- AddSaturate<int8_t>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- AddSaturate<int16_t>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- AddSaturate<int32_t>(this, Vd, Vm, Vn);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- UNIMPLEMENTED();
+void Simulator::DecodeUnconditional(Instruction* instr) {
+ // This follows the decoding in F4.1.18 Unconditional instructions.
+ int op0 = instr->Bits(26, 25);
+ int op1 = instr->Bit(20);
+
+ // Four classes of decoding:
+ // - Miscellaneous (omitted, no instructions used in V8).
+ // - Advanced SIMD data-processing.
+ // - Memory hints and barriers.
+ // - Advanced SIMD element or structure load/store.
+ if (op0 == 0b01) {
+ DecodeAdvancedSIMDDataProcessing(instr);
+ } else if ((op0 & 0b10) == 0b10 && op1) {
+ DecodeMemoryHintsAndBarriers(instr);
+ } else if (op0 == 0b10 && !op1) {
+ DecodeAdvancedSIMDElementOrStructureLoadStore(instr);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
+ // Advanced SIMD two registers, or three registers of different lengths.
+ int op0 = instr->Bit(24);
+ int op1 = instr->Bits(21, 20);
+ int op2 = instr->Bits(11, 10);
+ int op3 = instr->Bit(6);
+ if (!op0 && op1 == 0b11) {
+ // vext.8 Qd, Qm, Qn, imm4
+ int imm4 = instr->Bits(11, 8);
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ uint8_t src1[16], src2[16], dst[16];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ int boundary = kSimd128Size - imm4;
+ int i = 0;
+ for (; i < boundary; i++) {
+ dst[i] = src1[i + imm4];
+ }
+ for (; i < 16; i++) {
+ dst[i] = src2[i - boundary];
+ }
+ set_neon_register(Vd, dst);
+ } else if (op0 && op1 == 0b11 && ((op2 >> 1) == 0)) {
+ // Advanced SIMD two registers misc
+ int size = instr->Bits(19, 18);
+ int opc1 = instr->Bits(17, 16);
+ int opc2 = instr->Bits(10, 7);
+ int q = instr->Bit(6);
+
+ if (opc1 == 0 && (opc2 >> 2) == 0) {
+ // vrev<op>.size Qd, Qm
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
+ NeonSize op =
+ static_cast<NeonSize>(static_cast<int>(Neon64) - instr->Bits(8, 7));
+ switch (op) {
+ case Neon16: {
+ DCHECK_EQ(Neon8, size);
+ uint8_t src[16];
+ get_neon_register(Vm, src);
+ for (int i = 0; i < 16; i += 2) {
+ std::swap(src[i], src[i + 1]);
}
+ set_neon_register(Vd, src);
break;
}
- case 0x1: {
- if (instr->Bits(21, 20) == 2 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- // vmov Qd, Qm.
- // vorr, Qd, Qm, Qn.
- uint32_t src1[4];
- get_neon_register(Vm, src1);
- if (Vm != Vn) {
- uint32_t src2[4];
- get_neon_register(Vn, src2);
- for (int i = 0; i < 4; i++) {
- src1[i] = src1[i] | src2[i];
+ case Neon32: {
+ switch (size) {
+ case Neon16: {
+ uint16_t src[8];
+ get_neon_register(Vm, src);
+ for (int i = 0; i < 8; i += 2) {
+ std::swap(src[i], src[i + 1]);
}
+ set_neon_register(Vd, src);
+ break;
}
- set_neon_register(Vd, src1);
- } else if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- // vand Qd, Qm, Qn.
- uint32_t src1[4], src2[4];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- for (int i = 0; i < 4; i++) {
- src1[i] = src1[i] & src2[i];
- }
- set_neon_register(Vd, src1);
- } else if (instr->Bits(21, 20) == 1 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- // vbic Qd, Qm, Qn.
- uint32_t src1[4], src2[4];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- for (int i = 0; i < 4; i++) {
- src1[i] = src1[i] & ~src2[i];
- }
- set_neon_register(Vd, src1);
- } else {
- UNIMPLEMENTED();
- }
- break;
- }
- case 0x2: {
- if (instr->Bit(4) == 1) {
- // vqsub.s<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- SubSaturate<int8_t>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- SubSaturate<int16_t>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- SubSaturate<int32_t>(this, Vd, Vm, Vn);
- break;
- case Neon64:
- SubSaturate<int64_t>(this, Vd, Vm, Vn);
- break;
- default:
- UNREACHABLE();
- break;
+ case Neon8: {
+ uint8_t src[16];
+ get_neon_register(Vm, src);
+ for (int i = 0; i < 4; i++) {
+ std::swap(src[i * 4], src[i * 4 + 3]);
+ std::swap(src[i * 4 + 1], src[i * 4 + 2]);
+ }
+ set_neon_register(Vd, src);
+ break;
}
- } else {
- UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ break;
}
break;
}
- case 0x3: {
- // vcge/vcgt.s<size> Qd, Qm, Qn.
- bool ge = instr->Bit(4) == 1;
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ case Neon64: {
switch (size) {
- case Neon8:
- CompareGreater<int8_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ case Neon32: {
+ uint32_t src[4];
+ get_neon_register(Vm, src);
+ std::swap(src[0], src[1]);
+ std::swap(src[2], src[3]);
+ set_neon_register(Vd, src);
break;
- case Neon16:
- CompareGreater<int16_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ }
+ case Neon16: {
+ uint16_t src[8];
+ get_neon_register(Vm, src);
+ for (int i = 0; i < 2; i++) {
+ std::swap(src[i * 4], src[i * 4 + 3]);
+ std::swap(src[i * 4 + 1], src[i * 4 + 2]);
+ }
+ set_neon_register(Vd, src);
break;
- case Neon32:
- CompareGreater<int32_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ }
+ case Neon8: {
+ uint8_t src[16];
+ get_neon_register(Vm, src);
+ for (int i = 0; i < 4; i++) {
+ std::swap(src[i], src[7 - i]);
+ std::swap(src[i + 8], src[15 - i]);
+ }
+ set_neon_register(Vd, src);
break;
+ }
default:
UNREACHABLE();
break;
}
break;
}
- case 0x4: {
- // vshl s<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (size == 0 && opc1 == 0b10 && opc2 == 0) {
+ if (instr->Bit(6) == 0) {
+ // vswp Dd, Dm.
+ uint64_t dval, mval;
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ get_d_register(vd, &dval);
+ get_d_register(vm, &mval);
+ set_d_register(vm, &dval);
+ set_d_register(vd, &mval);
+ } else {
+ // vswp Qd, Qm.
+ uint32_t dval[4], mval[4];
+ int vd = instr->VFPDRegValue(kSimd128Precision);
+ int vm = instr->VFPMRegValue(kSimd128Precision);
+ get_neon_register(vd, dval);
+ get_neon_register(vm, mval);
+ set_neon_register(vm, dval);
+ set_neon_register(vd, mval);
+ }
+ } else if (opc1 == 0 && opc2 == 0b1011) {
+ // vmvn Qd, Qm.
+ int vd = instr->VFPDRegValue(kSimd128Precision);
+ int vm = instr->VFPMRegValue(kSimd128Precision);
+ uint32_t q_data[4];
+ get_neon_register(vm, q_data);
+ for (int i = 0; i < 4; i++) q_data[i] = ~q_data[i];
+ set_neon_register(vd, q_data);
+ } else if (opc1 == 0b01 && (opc2 & 0b0111) == 0b110) {
+ // vabs<type>.<size> Qd, Qm
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ if (instr->Bit(10) != 0) {
+ // floating point (clear sign bits)
+ uint32_t src[4];
+ get_neon_register(Vm, src);
+ for (int i = 0; i < 4; i++) {
+ src[i] &= ~0x80000000;
+ }
+ set_neon_register(Vd, src);
+ } else {
+ // signed integer
+ switch (size) {
+ case Neon8:
+ Abs<int8_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ case Neon16:
+ Abs<int16_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ case Neon32:
+ Abs<int32_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ }
+ } else if (opc1 == 0b01 && (opc2 & 0b0111) == 0b111) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ // vneg<type>.<size> Qd, Qm (signed integer)
+ if (instr->Bit(10) != 0) {
+ // floating point (toggle sign bits)
+ uint32_t src[4];
+ get_neon_register(Vm, src);
+ for (int i = 0; i < 4; i++) {
+ src[i] ^= 0x80000000;
+ }
+ set_neon_register(Vd, src);
+ } else {
+ // signed integer
+ switch (size) {
+ case Neon8:
+ Neg<int8_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ case Neon16:
+ Neg<int16_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ case Neon32:
+ Neg<int32_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ }
+ } else if (opc1 == 0b10 && opc2 == 0b0001) {
+ if (q) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ // vtrn.<size> Qd, Qm.
+ switch (size) {
+ case Neon8:
+ Transpose<uint8_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ case Neon16:
+ Transpose<uint16_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ case Neon32:
+ Transpose<uint32_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ // vtrn.<size> Dd, Dm.
+ switch (size) {
+ case Neon8:
+ Transpose<uint8_t, kDoubleSize>(this, Vd, Vm);
+ break;
+ case Neon16:
+ Transpose<uint16_t, kDoubleSize>(this, Vd, Vm);
+ break;
+ case Neon32:
+ Transpose<uint32_t, kDoubleSize>(this, Vd, Vm);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else if (opc1 == 0b10 && (opc2 & 0b1110) == 0b0010) {
+ NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
+ if (q) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ if (instr->Bit(7) == 1) {
+ // vzip.<size> Qd, Qm.
switch (size) {
case Neon8:
- ShiftByRegister<int8_t, int8_t, kSimd128Size>(this, Vd, Vm, Vn);
+ Zip<uint8_t, kSimd128Size>(this, Vd, Vm);
break;
case Neon16:
- ShiftByRegister<int16_t, int16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ Zip<uint16_t, kSimd128Size>(this, Vd, Vm);
break;
case Neon32:
- ShiftByRegister<int32_t, int32_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon64:
- ShiftByRegister<int64_t, int64_t, kSimd128Size>(this, Vd, Vm, Vn);
+ Zip<uint32_t, kSimd128Size>(this, Vd, Vm);
break;
default:
UNREACHABLE();
break;
}
- break;
- }
- case 0x6: {
- // vmin/vmax.s<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- bool min = instr->Bit(4) != 0;
+ } else {
+ // vuzp.<size> Qd, Qm.
switch (size) {
case Neon8:
- MinMax<int8_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ Unzip<uint8_t, kSimd128Size>(this, Vd, Vm);
break;
case Neon16:
- MinMax<int16_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ Unzip<uint16_t, kSimd128Size>(this, Vd, Vm);
break;
case Neon32:
- MinMax<int32_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ Unzip<uint32_t, kSimd128Size>(this, Vd, Vm);
break;
default:
UNREACHABLE();
break;
}
- break;
}
- case 0x8: {
- // vadd/vtst
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- if (instr->Bit(4) == 0) {
- // vadd.i<size> Qd, Qm, Qn.
- switch (size) {
- case Neon8:
- Add<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- Add<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- Add<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon64:
- Add<uint64_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- }
- } else {
- // vtst.i<size> Qd, Qm, Qn.
- switch (size) {
- case Neon8:
- Test<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- Test<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- Test<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- break;
- }
- case 0x9: {
- if (instr->Bit(6) == 1 && instr->Bit(4) == 1) {
- // vmul.i<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- Mul<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- Mul<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- Mul<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- UNIMPLEMENTED();
- }
- break;
- }
- case 0xA: {
- // vpmin/vpmax.s<size> Dd, Dm, Dn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- bool min = instr->Bit(4) != 0;
+ } else {
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ if (instr->Bit(7) == 1) {
+ // vzip.<size> Dd, Dm.
switch (size) {
case Neon8:
- PairwiseMinMax<int8_t>(this, Vd, Vm, Vn, min);
+ Zip<uint8_t, kDoubleSize>(this, Vd, Vm);
break;
case Neon16:
- PairwiseMinMax<int16_t>(this, Vd, Vm, Vn, min);
+ Zip<uint16_t, kDoubleSize>(this, Vd, Vm);
break;
case Neon32:
- PairwiseMinMax<int32_t>(this, Vd, Vm, Vn, min);
+ UNIMPLEMENTED();
break;
default:
UNREACHABLE();
break;
}
- break;
- }
- case 0xB: {
- // vpadd.i<size> Dd, Dm, Dn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ } else {
+ // vuzp.<size> Dd, Dm.
switch (size) {
case Neon8:
- PairwiseAdd<int8_t>(this, Vd, Vm, Vn);
+ Unzip<uint8_t, kDoubleSize>(this, Vd, Vm);
break;
case Neon16:
- PairwiseAdd<int16_t>(this, Vd, Vm, Vn);
+ Unzip<uint16_t, kDoubleSize>(this, Vd, Vm);
break;
case Neon32:
- PairwiseAdd<int32_t>(this, Vd, Vm, Vn);
+ UNIMPLEMENTED();
break;
default:
UNREACHABLE();
break;
}
- break;
}
- case 0xD: {
- if (instr->Bit(4) == 0) {
- float src1[4], src2[4];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- for (int i = 0; i < 4; i++) {
- if (instr->Bit(21) == 0) {
- // vadd.f32 Qd, Qm, Qn.
- src1[i] = src1[i] + src2[i];
- } else {
- // vsub.f32 Qd, Qm, Qn.
- src1[i] = src1[i] - src2[i];
- }
- }
- set_neon_register(Vd, src1);
+ }
+ } else if (opc1 == 0b10 && (opc2 & 0b1110) == 0b0100) {
+ // vqmovn.<type><size> Dd, Qm.
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
+ bool dst_unsigned = instr->Bit(6) != 0;
+ bool src_unsigned = instr->Bits(7, 6) == 0b11;
+ DCHECK_IMPLIES(src_unsigned, dst_unsigned);
+ switch (size) {
+ case Neon8: {
+ if (src_unsigned) {
+ SaturatingNarrow<uint16_t, uint8_t>(this, Vd, Vm);
+ } else if (dst_unsigned) {
+ SaturatingUnsignedNarrow<int16_t, uint8_t>(this, Vd, Vm);
} else {
- UNIMPLEMENTED();
+ SaturatingNarrow<int16_t, int8_t>(this, Vd, Vm);
}
break;
}
- case 0xE: {
- if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 0) {
- // vceq.f32.
- float src1[4], src2[4];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- uint32_t dst[4];
- for (int i = 0; i < 4; i++) {
- dst[i] = (src1[i] == src2[i]) ? 0xFFFFFFFF : 0;
- }
- set_neon_register(Vd, dst);
+ case Neon16: {
+ if (src_unsigned) {
+ SaturatingNarrow<uint32_t, uint16_t>(this, Vd, Vm);
+ } else if (dst_unsigned) {
+ SaturatingUnsignedNarrow<int32_t, uint16_t>(this, Vd, Vm);
} else {
- UNIMPLEMENTED();
+ SaturatingNarrow<int32_t, int16_t>(this, Vd, Vm);
}
break;
}
- case 0xF: {
- if (instr->Bit(20) == 0 && instr->Bit(6) == 1) {
- float src1[4], src2[4];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- if (instr->Bit(4) == 1) {
- if (instr->Bit(21) == 0) {
- // vrecps.f32 Qd, Qm, Qn.
- for (int i = 0; i < 4; i++) {
- src1[i] = 2.0f - src1[i] * src2[i];
- }
- } else {
- // vrsqrts.f32 Qd, Qm, Qn.
- for (int i = 0; i < 4; i++) {
- src1[i] = (3.0f - src1[i] * src2[i]) * 0.5f;
- }
- }
- } else {
- // vmin/vmax.f32 Qd, Qm, Qn.
- bool min = instr->Bit(21) == 1;
- bool saved = FPSCR_default_NaN_mode_;
- FPSCR_default_NaN_mode_ = true;
- for (int i = 0; i < 4; i++) {
- // vmin returns default NaN if any input is NaN.
- src1[i] = canonicalizeNaN(MinMax(src1[i], src2[i], min));
- }
- FPSCR_default_NaN_mode_ = saved;
- }
- set_neon_register(Vd, src1);
+ case Neon32: {
+ if (src_unsigned) {
+ SaturatingNarrow<uint64_t, uint32_t>(this, Vd, Vm);
+ } else if (dst_unsigned) {
+ SaturatingUnsignedNarrow<int64_t, uint32_t>(this, Vd, Vm);
} else {
- UNIMPLEMENTED();
+ SaturatingNarrow<int64_t, int32_t>(this, Vd, Vm);
}
break;
}
@@ -4630,104 +4653,197 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
UNIMPLEMENTED();
break;
}
- break;
- }
- case 5:
- if (instr->Bit(23) == 1 && instr->Bits(21, 19) == 0 &&
- instr->Bit(7) == 0 && instr->Bit(4) == 1) {
- // One register and a modified immediate value, see ARM DDI 0406C.d
- // A7.4.6. Handles vmov, vorr, vmvn, vbic.
- // Only handle vmov.i32 for now.
- VmovImmediate(this, instr);
- } else if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
- (instr->Bit(4) == 1)) {
- // vmovl signed
- if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- int imm3 = instr->Bits(21, 19);
- switch (imm3) {
- case 1:
- Widen<int8_t, int16_t>(this, Vd, Vm);
- break;
- case 2:
- Widen<int16_t, int32_t>(this, Vd, Vm);
- break;
- case 4:
- Widen<int32_t, int64_t>(this, Vd, Vm);
- break;
- default:
- UNIMPLEMENTED();
- break;
+ } else if (opc1 == 0b10 && instr->Bit(10) == 1) {
+ // vrint<q>.<dt> <Dd>, <Dm>
+ // vrint<q>.<dt> <Qd>, <Qm>
+ // See F6.1.205
+ int regs = instr->Bit(6) + 1;
+ int rounding_mode = instr->Bits(9, 7);
+ float (*fproundint)(float) = nullptr;
+ switch (rounding_mode) {
+ case 0:
+ fproundint = &nearbyintf;
+ break;
+ case 3:
+ fproundint = &truncf;
+ break;
+ case 5:
+ fproundint = &floorf;
+ break;
+ case 7:
+ fproundint = &ceilf;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+
+ float floats[2];
+ for (int r = 0; r < regs; r++) {
+ // We cannot simply use GetVFPSingleValue since our Q registers
+ // might not map to any S registers at all.
+ get_neon_register<float, kDoubleSize>(vm + r, floats);
+ for (int e = 0; e < 2; e++) {
+ floats[e] = canonicalizeNaN(fproundint(floats[e]));
}
- } else if (instr->Bits(21, 20) == 3 && instr->Bit(4) == 0) {
- // vext.
- int imm4 = instr->Bits(11, 8);
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- uint8_t src1[16], src2[16], dst[16];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- int boundary = kSimd128Size - imm4;
- int i = 0;
- for (; i < boundary; i++) {
- dst[i] = src1[i + imm4];
+ set_neon_register<float, kDoubleSize>(vd + r, floats);
+ }
+ } else if (opc1 == 0b11 && (opc2 & 0b1100) == 0b1000) {
+ // vrecpe/vrsqrte.f32 Qd, Qm.
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ uint32_t src[4];
+ get_neon_register(Vm, src);
+ if (instr->Bit(7) == 0) {
+ for (int i = 0; i < 4; i++) {
+ float denom = bit_cast<float>(src[i]);
+ div_zero_vfp_flag_ = (denom == 0);
+ float result = 1.0f / denom;
+ result = canonicalizeNaN(result);
+ src[i] = bit_cast<uint32_t>(result);
}
- for (; i < 16; i++) {
- dst[i] = src2[i - boundary];
+ } else {
+ for (int i = 0; i < 4; i++) {
+ float radicand = bit_cast<float>(src[i]);
+ float result = 1.0f / std::sqrt(radicand);
+ result = canonicalizeNaN(result);
+ src[i] = bit_cast<uint32_t>(result);
}
- set_neon_register(Vd, dst);
- } else if (instr->Bits(11, 8) == 5 && instr->Bit(4) == 1) {
- // vshl.i<size> Qd, Qm, shift
- int imm7 = instr->Bits(21, 16);
- if (instr->Bit(7) != 0) imm7 += 64;
- int size = base::bits::RoundDownToPowerOfTwo32(imm7);
- int shift = imm7 - size;
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- NeonSize ns =
- static_cast<NeonSize>(base::bits::WhichPowerOfTwo(size >> 3));
- switch (ns) {
- case Neon8:
- ShiftLeft<uint8_t, kSimd128Size>(this, Vd, Vm, shift);
+ }
+ set_neon_register(Vd, src);
+ } else if (opc1 == 0b11 && (opc2 & 0b1100) == 0b1100) {
+ // vcvt.<Td>.<Tm> Qd, Qm.
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ uint32_t q_data[4];
+ get_neon_register(Vm, q_data);
+ int op = instr->Bits(8, 7);
+ for (int i = 0; i < 4; i++) {
+ switch (op) {
+ case 0:
+ // f32 <- s32, round towards nearest.
+ q_data[i] = bit_cast<uint32_t>(
+ std::round(static_cast<float>(bit_cast<int32_t>(q_data[i]))));
break;
- case Neon16:
- ShiftLeft<uint16_t, kSimd128Size>(this, Vd, Vm, shift);
+ case 1:
+ // f32 <- u32, round towards nearest.
+ q_data[i] =
+ bit_cast<uint32_t>(std::round(static_cast<float>(q_data[i])));
break;
- case Neon32:
- ShiftLeft<uint32_t, kSimd128Size>(this, Vd, Vm, shift);
+ case 2:
+ // s32 <- f32, round to zero.
+ q_data[i] = static_cast<uint32_t>(
+ ConvertDoubleToInt(bit_cast<float>(q_data[i]), false, RZ));
break;
- case Neon64:
- ShiftLeft<uint64_t, kSimd128Size>(this, Vd, Vm, shift);
+ case 3:
+ // u32 <- f32, round to zero.
+ q_data[i] = static_cast<uint32_t>(
+ ConvertDoubleToInt(bit_cast<float>(q_data[i]), true, RZ));
break;
}
- } else if (instr->Bits(11, 8) == 0 && instr->Bit(4) == 1) {
- // vshr.s<size> Qd, Qm, shift
- int imm7 = instr->Bits(21, 16);
- if (instr->Bit(7) != 0) imm7 += 64;
- int size = base::bits::RoundDownToPowerOfTwo32(imm7);
- int shift = 2 * size - imm7;
+ }
+ set_neon_register(Vd, q_data);
+ }
+ } else if (op0 && op1 == 0b11 && op2 == 0b10) {
+ // vtb[l,x] Dd, <list>, Dm.
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ int vn = instr->VFPNRegValue(kDoublePrecision);
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ int table_len = (instr->Bits(9, 8) + 1) * kDoubleSize;
+ bool vtbx = instr->Bit(6) != 0; // vtbl / vtbx
+ uint64_t destination = 0, indices = 0, result = 0;
+ get_d_register(vd, &destination);
+ get_d_register(vm, &indices);
+ for (int i = 0; i < kDoubleSize; i++) {
+ int shift = i * kBitsPerByte;
+ int index = (indices >> shift) & 0xFF;
+ if (index < table_len) {
+ uint64_t table;
+ get_d_register(vn + index / kDoubleSize, &table);
+ result |= ((table >> ((index % kDoubleSize) * kBitsPerByte)) & 0xFF)
+ << shift;
+ } else if (vtbx) {
+ result |= destination & (0xFFull << shift);
+ }
+ }
+ set_d_register(vd, &result);
+ } else if (op0 && op1 == 0b11 && op2 == 0b11) {
+ // Advanced SIMD duplicate (scalar)
+ if (instr->Bits(9, 7) == 0) {
+ // vdup.<size> Dd, Dm[index].
+ // vdup.<size> Qd, Dm[index].
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ int imm4 = instr->Bits(19, 16);
+ int size = 0, index = 0, mask = 0;
+ if ((imm4 & 0x1) != 0) {
+ size = 8;
+ index = imm4 >> 1;
+ mask = 0xFFu;
+ } else if ((imm4 & 0x2) != 0) {
+ size = 16;
+ index = imm4 >> 2;
+ mask = 0xFFFFu;
+ } else {
+ size = 32;
+ index = imm4 >> 3;
+ mask = 0xFFFFFFFFu;
+ }
+ uint64_t d_data;
+ get_d_register(vm, &d_data);
+ uint32_t scalar = (d_data >> (size * index)) & mask;
+ uint32_t duped = scalar;
+ for (int i = 1; i < 32 / size; i++) {
+ scalar <<= size;
+ duped |= scalar;
+ }
+ uint32_t result[4] = {duped, duped, duped, duped};
+ if (instr->Bit(6) == 0) {
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ set_d_register(vd, result);
+ } else {
+ int vd = instr->VFPDRegValue(kSimd128Precision);
+ set_neon_register(vd, result);
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ } else if (op1 != 0b11 && !op3) {
+ // Advanced SIMD three registers of different lengths.
+ int u = instr->Bit(24);
+ int opc = instr->Bits(11, 8);
+ if (opc == 0b1000) {
+ // vmlal.u<size> Qd, Dn, Dm
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ if (size != Neon32) UNIMPLEMENTED();
+
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ uint64_t src1, src2, dst[2];
+
+ get_neon_register<uint64_t>(Vd, dst);
+ get_d_register(Vn, &src1);
+ get_d_register(Vm, &src2);
+ dst[0] += (src1 & 0xFFFFFFFFULL) * (src2 & 0xFFFFFFFFULL);
+ dst[1] += (src1 >> 32) * (src2 >> 32);
+ set_neon_register<uint64_t>(Vd, dst);
+ } else if (opc == 0b1100) {
+ if (u) {
+ // vmull.u<size> Qd, Dn, Dm
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- NeonSize ns =
- static_cast<NeonSize>(base::bits::WhichPowerOfTwo(size >> 3));
- switch (ns) {
- case Neon8:
- ArithmeticShiftRight<int8_t, kSimd128Size>(this, Vd, Vm, shift);
- break;
- case Neon16:
- ArithmeticShiftRight<int16_t, kSimd128Size>(this, Vd, Vm, shift);
- break;
- case Neon32:
- ArithmeticShiftRight<int32_t, kSimd128Size>(this, Vd, Vm, shift);
- break;
- case Neon64:
- ArithmeticShiftRight<int64_t, kSimd128Size>(this, Vd, Vm, shift);
+ int Vn = instr->VFPNRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ switch (size) {
+ case Neon32: {
+ MultiplyLong<uint32_t, uint64_t>(this, Vd, Vn, Vm);
break;
+ }
+ default:
+ UNIMPLEMENTED();
}
- } else if (instr->Bits(11, 8) == 0xC && instr->Bit(6) == 0 &&
- instr->Bit(4) == 0) {
+ } else {
// vmull.s<size> Qd, Dn, Dm
NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
@@ -4741,920 +4857,716 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
default:
UNIMPLEMENTED();
}
- } else {
- UNIMPLEMENTED();
}
- break;
- case 6: {
- int Vd, Vm, Vn;
- if (instr->Bit(6) == 0) {
- Vd = instr->VFPDRegValue(kDoublePrecision);
- Vm = instr->VFPMRegValue(kDoublePrecision);
- Vn = instr->VFPNRegValue(kDoublePrecision);
- } else {
- Vd = instr->VFPDRegValue(kSimd128Precision);
- Vm = instr->VFPMRegValue(kSimd128Precision);
- Vn = instr->VFPNRegValue(kSimd128Precision);
- }
- switch (instr->Bits(11, 8)) {
- case 0x0: {
- if (instr->Bit(4) == 1) {
- // vqadd.u<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- AddSaturate<uint8_t>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- AddSaturate<uint16_t>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- AddSaturate<uint32_t>(this, Vd, Vm, Vn);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- UNIMPLEMENTED();
- }
- break;
- }
- case 0x1: {
- if (instr->Bits(21, 20) == 1 && instr->Bit(4) == 1) {
- // vbsl.size Qd, Qm, Qn.
- uint32_t dst[4], src1[4], src2[4];
- get_neon_register(Vd, dst);
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- for (int i = 0; i < 4; i++) {
- dst[i] = (dst[i] & src1[i]) | (~dst[i] & src2[i]);
- }
- set_neon_register(Vd, dst);
- } else if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 1) {
- if (instr->Bit(6) == 0) {
- // veor Dd, Dn, Dm
- uint64_t src1, src2;
- get_d_register(Vn, &src1);
- get_d_register(Vm, &src2);
- src1 ^= src2;
- set_d_register(Vd, &src1);
+ }
+ } else if (op1 != 0b11 && op3) {
+ // The instructions specified by this encoding are not used in V8.
+ UNIMPLEMENTED();
+ } else {
+ UNIMPLEMENTED();
+ }
+}
- } else {
- // veor Qd, Qn, Qm
- uint32_t src1[4], src2[4];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- for (int i = 0; i < 4; i++) src1[i] ^= src2[i];
- set_neon_register(Vd, src1);
- }
- } else if (instr->Bit(4) == 0) {
- if (instr->Bit(6) == 0) {
- // vrhadd.u<size> Dd, Dm, Dn.
- UNIMPLEMENTED();
- }
- // vrhadd.u<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- RoundingAverageUnsigned<uint8_t>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- RoundingAverageUnsigned<uint16_t>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- RoundingAverageUnsigned<uint32_t>(this, Vd, Vm, Vn);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- UNIMPLEMENTED();
- }
+void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
+ int op0 = instr->Bit(23);
+ int op1 = instr->Bit(4);
+
+ if (op0 == 0) {
+ // Advanced SIMD three registers of same length.
+ int u = instr->Bit(24);
+ int opc = instr->Bits(11, 8);
+ int q = instr->Bit(6);
+ int sz = instr->Bits(21, 20);
+ int Vd, Vm, Vn;
+ if (q) {
+ Vd = instr->VFPDRegValue(kSimd128Precision);
+ Vm = instr->VFPMRegValue(kSimd128Precision);
+ Vn = instr->VFPNRegValue(kSimd128Precision);
+ } else {
+ Vd = instr->VFPDRegValue(kDoublePrecision);
+ Vm = instr->VFPMRegValue(kDoublePrecision);
+ Vn = instr->VFPNRegValue(kDoublePrecision);
+ }
+
+ if (!u && opc == 0 && op1) {
+ // vqadd.s<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ AddSat<int8_t>(this, Vd, Vm, Vn);
break;
- }
- case 0x2: {
- if (instr->Bit(4) == 1) {
- // vqsub.u<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- SubSaturate<uint8_t>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- SubSaturate<uint16_t>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- SubSaturate<uint32_t>(this, Vd, Vm, Vn);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- UNIMPLEMENTED();
- }
+ case Neon16:
+ AddSat<int16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ AddSat<int32_t>(this, Vd, Vm, Vn);
break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 1 && sz == 2 && q && op1) {
+ // vmov Qd, Qm.
+ // vorr, Qd, Qm, Qn.
+ uint32_t src1[4];
+ get_neon_register(Vm, src1);
+ if (Vm != Vn) {
+ uint32_t src2[4];
+ get_neon_register(Vn, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = src1[i] | src2[i];
}
- case 0x3: {
- // vcge/vcgt.u<size> Qd, Qm, Qn.
- bool ge = instr->Bit(4) == 1;
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- CompareGreater<uint8_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
- break;
- case Neon16:
- CompareGreater<uint16_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
- break;
- case Neon32:
- CompareGreater<uint32_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
- break;
- default:
- UNREACHABLE();
- break;
- }
+ }
+ set_neon_register(Vd, src1);
+ } else if (!u && opc == 1 && sz == 0 && q && op1) {
+ // vand Qd, Qm, Qn.
+ uint32_t src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = src1[i] & src2[i];
+ }
+ set_neon_register(Vd, src1);
+ } else if (!u && opc == 1 && sz == 1 && q && op1) {
+ // vbic Qd, Qm, Qn.
+ uint32_t src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = src1[i] & ~src2[i];
+ }
+ set_neon_register(Vd, src1);
+ } else if (!u && opc == 2 && op1) {
+ // vqsub.s<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ SubSat<int8_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ SubSat<int16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ SubSat<int32_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon64:
+ SubSat<int64_t>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 3) {
+ // vcge/vcgt.s<size> Qd, Qm, Qn.
+ bool ge = instr->Bit(4) == 1;
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ CompareGreater<int8_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ break;
+ case Neon16:
+ CompareGreater<int16_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ break;
+ case Neon32:
+ CompareGreater<int32_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 4 && !op1) {
+ // vshl s<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ ShiftByRegister<int8_t, int8_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ ShiftByRegister<int16_t, int16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ ShiftByRegister<int32_t, int32_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon64:
+ ShiftByRegister<int64_t, int64_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 6) {
+ // vmin/vmax.s<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ bool min = instr->Bit(4) != 0;
+ switch (size) {
+ case Neon8:
+ MinMax<int8_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ break;
+ case Neon16:
+ MinMax<int16_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ break;
+ case Neon32:
+ MinMax<int32_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 8 && op1) {
+ // vtst.i<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ Test<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ Test<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ Test<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 8 && !op1) {
+ // vadd.i<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ Add<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ Add<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ Add<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon64:
+ Add<uint64_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ }
+ } else if (opc == 9 && op1) {
+ // vmul.i<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ Mul<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ Mul<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ Mul<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 0xA) {
+ // vpmin/vpmax.s<size> Dd, Dm, Dn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ bool min = instr->Bit(4) != 0;
+ switch (size) {
+ case Neon8:
+ PairwiseMinMax<int8_t>(this, Vd, Vm, Vn, min);
+ break;
+ case Neon16:
+ PairwiseMinMax<int16_t>(this, Vd, Vm, Vn, min);
+ break;
+ case Neon32:
+ PairwiseMinMax<int32_t>(this, Vd, Vm, Vn, min);
break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 0xB) {
+ // vpadd.i<size> Dd, Dm, Dn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ PairwiseAdd<int8_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ PairwiseAdd<int16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ PairwiseAdd<int32_t>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 0xD && !op1) {
+ float src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ if (instr->Bit(21) == 0) {
+ // vadd.f32 Qd, Qm, Qn.
+ src1[i] = src1[i] + src2[i];
+ } else {
+ // vsub.f32 Qd, Qm, Qn.
+ src1[i] = src1[i] - src2[i];
}
- case 0x4: {
- // vshl s<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- ShiftByRegister<uint8_t, int8_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- ShiftByRegister<uint16_t, int16_t, kSimd128Size>(this, Vd, Vm,
- Vn);
- break;
- case Neon32:
- ShiftByRegister<uint32_t, int32_t, kSimd128Size>(this, Vd, Vm,
- Vn);
- break;
- case Neon64:
- ShiftByRegister<uint64_t, int64_t, kSimd128Size>(this, Vd, Vm,
- Vn);
- break;
- default:
- UNREACHABLE();
- break;
- }
+ }
+ set_neon_register(Vd, src1);
+ } else if (!u && opc == 0xE && !sz && !op1) {
+ // vceq.f32.
+ float src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ uint32_t dst[4];
+ for (int i = 0; i < 4; i++) {
+ dst[i] = (src1[i] == src2[i]) ? 0xFFFFFFFF : 0;
+ }
+ set_neon_register(Vd, dst);
+ } else if (!u && opc == 0xF && op1) {
+ float src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ if (instr->Bit(21) == 0) {
+ // vrecps.f32 Qd, Qm, Qn.
+ for (int i = 0; i < 4; i++) {
+ src1[i] = 2.0f - src1[i] * src2[i];
+ }
+ } else {
+ // vrsqrts.f32 Qd, Qm, Qn.
+ for (int i = 0; i < 4; i++) {
+ src1[i] = (3.0f - src1[i] * src2[i]) * 0.5f;
+ }
+ }
+ set_neon_register(Vd, src1);
+ } else if (!u && opc == 0xF && !op1) {
+ float src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ // vmin/vmax.f32 Qd, Qm, Qn.
+ bool min = instr->Bit(21) == 1;
+ bool saved = FPSCR_default_NaN_mode_;
+ FPSCR_default_NaN_mode_ = true;
+ for (int i = 0; i < 4; i++) {
+ // vmin returns default NaN if any input is NaN.
+ src1[i] = canonicalizeNaN(MinMax(src1[i], src2[i], min));
+ }
+ FPSCR_default_NaN_mode_ = saved;
+ set_neon_register(Vd, src1);
+ } else if (u && opc == 0 && op1) {
+ // vqadd.u<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ AddSat<uint8_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ AddSat<uint16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ AddSat<uint32_t>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (u && opc == 1 && sz == 1 && op1) {
+ // vbsl.size Qd, Qm, Qn.
+ uint32_t dst[4], src1[4], src2[4];
+ get_neon_register(Vd, dst);
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ dst[i] = (dst[i] & src1[i]) | (~dst[i] & src2[i]);
+ }
+ set_neon_register(Vd, dst);
+ } else if (u && opc == 1 && sz == 0 && !q && op1) {
+ // veor Dd, Dn, Dm
+ uint64_t src1, src2;
+ get_d_register(Vn, &src1);
+ get_d_register(Vm, &src2);
+ src1 ^= src2;
+ set_d_register(Vd, &src1);
+ } else if (u && opc == 1 && sz == 0 && q && op1) {
+ // veor Qd, Qn, Qm
+ uint32_t src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ for (int i = 0; i < 4; i++) src1[i] ^= src2[i];
+ set_neon_register(Vd, src1);
+ } else if (u && opc == 1 && !op1) {
+ // vrhadd.u<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ RoundingAverageUnsigned<uint8_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ RoundingAverageUnsigned<uint16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ RoundingAverageUnsigned<uint32_t>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (u && opc == 2 && op1) {
+ // vqsub.u<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ SubSat<uint8_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ SubSat<uint16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ SubSat<uint32_t>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (u && opc == 3) {
+ // vcge/vcgt.u<size> Qd, Qm, Qn.
+ bool ge = instr->Bit(4) == 1;
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ CompareGreater<uint8_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ break;
+ case Neon16:
+ CompareGreater<uint16_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ break;
+ case Neon32:
+ CompareGreater<uint32_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (u && opc == 4 && !op1) {
+ // vshl u<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ ShiftByRegister<uint8_t, int8_t, kSimd128Size>(this, Vd, Vm, Vn);
break;
+ case Neon16:
+ ShiftByRegister<uint16_t, int16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ ShiftByRegister<uint32_t, int32_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon64:
+ ShiftByRegister<uint64_t, int64_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (u && opc == 6) {
+ // vmin/vmax.u<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ bool min = instr->Bit(4) != 0;
+ switch (size) {
+ case Neon8:
+ MinMax<uint8_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ break;
+ case Neon16:
+ MinMax<uint16_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ break;
+ case Neon32:
+ MinMax<uint32_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (u && opc == 8 && !op1) {
+ // vsub.size Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ Sub<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ Sub<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ Sub<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon64:
+ Sub<uint64_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ }
+ } else if (u && opc == 8 && op1) {
+ // vceq.size Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ CompareEqual<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ CompareEqual<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ CompareEqual<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (u && opc == 0xA) {
+ // vpmin/vpmax.u<size> Dd, Dm, Dn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ bool min = instr->Bit(4) != 0;
+ switch (size) {
+ case Neon8:
+ PairwiseMinMax<uint8_t>(this, Vd, Vm, Vn, min);
+ break;
+ case Neon16:
+ PairwiseMinMax<uint16_t>(this, Vd, Vm, Vn, min);
+ break;
+ case Neon32:
+ PairwiseMinMax<uint32_t>(this, Vd, Vm, Vn, min);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (u && opc == 0xD && sz == 0 && q && op1) {
+ // vmul.f32 Qd, Qn, Qm
+ float src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = src1[i] * src2[i];
+ }
+ set_neon_register(Vd, src1);
+ } else if (u && opc == 0xD && sz == 0 && !q && !op1) {
+ // vpadd.f32 Dd, Dn, Dm
+ PairwiseAdd<float>(this, Vd, Vm, Vn);
+ } else if (u && opc == 0xE && !op1) {
+ // vcge/vcgt.f32 Qd, Qm, Qn
+ bool ge = instr->Bit(21) == 0;
+ float src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ uint32_t dst[4];
+ for (int i = 0; i < 4; i++) {
+ if (ge) {
+ dst[i] = src1[i] >= src2[i] ? 0xFFFFFFFFu : 0;
+ } else {
+ dst[i] = src1[i] > src2[i] ? 0xFFFFFFFFu : 0;
}
- case 0x6: {
- // vmin/vmax.u<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- bool min = instr->Bit(4) != 0;
- switch (size) {
+ }
+ set_neon_register(Vd, dst);
+ } else {
+ UNIMPLEMENTED();
+ }
+ return;
+ } else if (op0 == 1 && op1 == 0) {
+ DecodeAdvancedSIMDTwoOrThreeRegisters(instr);
+ } else if (op0 == 1 && op1 == 1) {
+ // Advanced SIMD shifts and immediate generation.
+ if (instr->Bits(21, 19) == 0 && instr->Bit(7) == 0) {
+ VmovImmediate(this, instr);
+ } else {
+ // Advanced SIMD two registers and shift amount.
+ int u = instr->Bit(24);
+ int imm3H = instr->Bits(21, 19);
+ int imm3L = instr->Bits(18, 16);
+ int opc = instr->Bits(11, 8);
+ int l = instr->Bit(7);
+ int q = instr->Bit(6);
+ int imm3H_L = imm3H << 1 | l;
+
+ if (imm3H_L != 0 && opc == 0) {
+ // vshr.s<size> Qd, Qm, shift
+ int imm7 = instr->Bits(21, 16);
+ if (instr->Bit(7) != 0) imm7 += 64;
+ int size = base::bits::RoundDownToPowerOfTwo32(imm7);
+ int shift = 2 * size - imm7;
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ NeonSize ns =
+ static_cast<NeonSize>(base::bits::WhichPowerOfTwo(size >> 3));
+ if (u) {
+ switch (ns) {
case Neon8:
- MinMax<uint8_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ ShiftRight<uint8_t, kSimd128Size>(this, Vd, Vm, shift);
break;
case Neon16:
- MinMax<uint16_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ ShiftRight<uint16_t, kSimd128Size>(this, Vd, Vm, shift);
break;
case Neon32:
- MinMax<uint32_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ ShiftRight<uint32_t, kSimd128Size>(this, Vd, Vm, shift);
break;
- default:
- UNREACHABLE();
+ case Neon64:
+ ShiftRight<uint64_t, kSimd128Size>(this, Vd, Vm, shift);
break;
}
- break;
- }
- case 0x8: {
- if (instr->Bit(4) == 0) {
- // vsub.size Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- Sub<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- Sub<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- Sub<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon64:
- Sub<uint64_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- }
- } else {
- // vceq.size Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- CompareEqual<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- CompareEqual<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- CompareEqual<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- break;
- }
- case 0xA: {
- // vpmin/vpmax.u<size> Dd, Dm, Dn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- bool min = instr->Bit(4) != 0;
- switch (size) {
+ } else {
+ switch (ns) {
case Neon8:
- PairwiseMinMax<uint8_t>(this, Vd, Vm, Vn, min);
+ ArithmeticShiftRight<int8_t, kSimd128Size>(this, Vd, Vm, shift);
break;
case Neon16:
- PairwiseMinMax<uint16_t>(this, Vd, Vm, Vn, min);
+ ArithmeticShiftRight<int16_t, kSimd128Size>(this, Vd, Vm, shift);
break;
case Neon32:
- PairwiseMinMax<uint32_t>(this, Vd, Vm, Vn, min);
+ ArithmeticShiftRight<int32_t, kSimd128Size>(this, Vd, Vm, shift);
break;
- default:
- UNREACHABLE();
+ case Neon64:
+ ArithmeticShiftRight<int64_t, kSimd128Size>(this, Vd, Vm, shift);
break;
}
- break;
}
- case 0xD: {
- if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- // vmul.f32 Qd, Qn, Qm
- float src1[4], src2[4];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- for (int i = 0; i < 4; i++) {
- src1[i] = src1[i] * src2[i];
- }
- set_neon_register(Vd, src1);
- } else if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 0 &&
- instr->Bit(4) == 0) {
- // vpadd.f32 Dd, Dn, Dm
- PairwiseAdd<float>(this, Vd, Vm, Vn);
- } else {
- UNIMPLEMENTED();
- }
- break;
- }
- case 0xE: {
- if (instr->Bit(20) == 0 && instr->Bit(4) == 0) {
- // vcge/vcgt.f32 Qd, Qm, Qn
- bool ge = instr->Bit(21) == 0;
- float src1[4], src2[4];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- uint32_t dst[4];
- for (int i = 0; i < 4; i++) {
- if (ge) {
- dst[i] = src1[i] >= src2[i] ? 0xFFFFFFFFu : 0;
- } else {
- dst[i] = src1[i] > src2[i] ? 0xFFFFFFFFu : 0;
- }
- }
- set_neon_register(Vd, dst);
- } else {
- UNIMPLEMENTED();
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- break;
- }
- case 7:
- if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
- (instr->Bit(4) == 1)) {
- // vmovl unsigned
- if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- int imm3 = instr->Bits(21, 19);
- switch (imm3) {
- case 1:
- Widen<uint8_t, uint16_t>(this, Vd, Vm);
- break;
- case 2:
- Widen<uint16_t, uint32_t>(this, Vd, Vm);
- break;
- case 4:
- Widen<uint32_t, uint64_t>(this, Vd, Vm);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- } else if (instr->Opc1Value() == 7 && instr->Bit(4) == 0) {
- if (instr->Bits(19, 16) == 0xB && instr->Bits(11, 9) == 0x3 &&
- instr->Bit(6) == 1) {
- // vcvt.<Td>.<Tm> Qd, Qm.
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- uint32_t q_data[4];
- get_neon_register(Vm, q_data);
- int op = instr->Bits(8, 7);
- for (int i = 0; i < 4; i++) {
- switch (op) {
- case 0:
- // f32 <- s32, round towards nearest.
- q_data[i] = bit_cast<uint32_t>(std::round(
- static_cast<float>(bit_cast<int32_t>(q_data[i]))));
- break;
- case 1:
- // f32 <- u32, round towards nearest.
- q_data[i] = bit_cast<uint32_t>(
- std::round(static_cast<float>(q_data[i])));
- break;
- case 2:
- // s32 <- f32, round to zero.
- q_data[i] = static_cast<uint32_t>(
- ConvertDoubleToInt(bit_cast<float>(q_data[i]), false, RZ));
- break;
- case 3:
- // u32 <- f32, round to zero.
- q_data[i] = static_cast<uint32_t>(
- ConvertDoubleToInt(bit_cast<float>(q_data[i]), true, RZ));
- break;
- }
- }
- set_neon_register(Vd, q_data);
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0) {
- if (instr->Bit(6) == 0) {
- // vswp Dd, Dm.
- uint64_t dval, mval;
- int vd = instr->VFPDRegValue(kDoublePrecision);
- int vm = instr->VFPMRegValue(kDoublePrecision);
- get_d_register(vd, &dval);
- get_d_register(vm, &mval);
- set_d_register(vm, &dval);
- set_d_register(vd, &mval);
- } else {
- // vswp Qd, Qm.
- uint32_t dval[4], mval[4];
- int vd = instr->VFPDRegValue(kSimd128Precision);
- int vm = instr->VFPMRegValue(kSimd128Precision);
- get_neon_register(vd, dval);
- get_neon_register(vm, mval);
- set_neon_register(vm, dval);
- set_neon_register(vd, mval);
- }
- } else if (instr->Bits(11, 7) == 0x18) {
- // vdup.<size> Dd, Dm[index].
- // vdup.<size> Qd, Dm[index].
- int vm = instr->VFPMRegValue(kDoublePrecision);
- int imm4 = instr->Bits(19, 16);
- int size = 0, index = 0, mask = 0;
- if ((imm4 & 0x1) != 0) {
- size = 8;
- index = imm4 >> 1;
- mask = 0xFFu;
- } else if ((imm4 & 0x2) != 0) {
- size = 16;
- index = imm4 >> 2;
- mask = 0xFFFFu;
- } else {
- size = 32;
- index = imm4 >> 3;
- mask = 0xFFFFFFFFu;
- }
- uint64_t d_data;
- get_d_register(vm, &d_data);
- uint32_t scalar = (d_data >> (size * index)) & mask;
- uint32_t duped = scalar;
- for (int i = 1; i < 32 / size; i++) {
- scalar <<= size;
- duped |= scalar;
- }
- uint32_t result[4] = {duped, duped, duped, duped};
- if (instr->Bit(6) == 0) {
- int vd = instr->VFPDRegValue(kDoublePrecision);
- set_d_register(vd, result);
- } else {
- int vd = instr->VFPDRegValue(kSimd128Precision);
- set_neon_register(vd, result);
- }
- } else if (instr->Bits(19, 16) == 0 && instr->Bits(11, 6) == 0x17) {
- // vmvn Qd, Qm.
- int vd = instr->VFPDRegValue(kSimd128Precision);
- int vm = instr->VFPMRegValue(kSimd128Precision);
- uint32_t q_data[4];
- get_neon_register(vm, q_data);
- for (int i = 0; i < 4; i++) q_data[i] = ~q_data[i];
- set_neon_register(vd, q_data);
- } else if (instr->Bits(11, 10) == 0x2) {
- // vtb[l,x] Dd, <list>, Dm.
- int vd = instr->VFPDRegValue(kDoublePrecision);
- int vn = instr->VFPNRegValue(kDoublePrecision);
- int vm = instr->VFPMRegValue(kDoublePrecision);
- int table_len = (instr->Bits(9, 8) + 1) * kDoubleSize;
- bool vtbx = instr->Bit(6) != 0; // vtbl / vtbx
- uint64_t destination = 0, indices = 0, result = 0;
- get_d_register(vd, &destination);
- get_d_register(vm, &indices);
- for (int i = 0; i < kDoubleSize; i++) {
- int shift = i * kBitsPerByte;
- int index = (indices >> shift) & 0xFF;
- if (index < table_len) {
- uint64_t table;
- get_d_register(vn + index / kDoubleSize, &table);
- result |=
- ((table >> ((index % kDoubleSize) * kBitsPerByte)) & 0xFF)
- << shift;
- } else if (vtbx) {
- result |= destination & (0xFFull << shift);
- }
- }
- set_d_register(vd, &result);
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 8) == 0x1) {
- NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
- if (instr->Bit(6) == 0) {
- int Vd = instr->VFPDRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- if (instr->Bit(7) == 1) {
- // vzip.<size> Dd, Dm.
- switch (size) {
- case Neon8:
- Zip<uint8_t, kDoubleSize>(this, Vd, Vm);
- break;
- case Neon16:
- Zip<uint16_t, kDoubleSize>(this, Vd, Vm);
- break;
- case Neon32:
- UNIMPLEMENTED();
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- // vuzp.<size> Dd, Dm.
- switch (size) {
- case Neon8:
- Unzip<uint8_t, kDoubleSize>(this, Vd, Vm);
- break;
- case Neon16:
- Unzip<uint16_t, kDoubleSize>(this, Vd, Vm);
- break;
- case Neon32:
- UNIMPLEMENTED();
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- } else {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- if (instr->Bit(7) == 1) {
- // vzip.<size> Qd, Qm.
- switch (size) {
- case Neon8:
- Zip<uint8_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon16:
- Zip<uint16_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon32:
- Zip<uint32_t, kSimd128Size>(this, Vd, Vm);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- // vuzp.<size> Qd, Qm.
- switch (size) {
- case Neon8:
- Unzip<uint8_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon16:
- Unzip<uint16_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon32:
- Unzip<uint32_t, kSimd128Size>(this, Vd, Vm);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- }
- } else if (instr->Bits(17, 16) == 0 && instr->Bits(11, 9) == 0) {
- // vrev<op>.size Qd, Qm
+ } else if (imm3H_L != 0 && imm3L == 0 && opc == 0b1010 && !q) {
+ if (u) {
+ // vmovl unsigned
+ if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
- NeonSize op = static_cast<NeonSize>(static_cast<int>(Neon64) -
- instr->Bits(8, 7));
- switch (op) {
- case Neon16: {
- DCHECK_EQ(Neon8, size);
- uint8_t src[16];
- get_neon_register(Vm, src);
- for (int i = 0; i < 16; i += 2) {
- std::swap(src[i], src[i + 1]);
- }
- set_neon_register(Vd, src);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ int imm3 = instr->Bits(21, 19);
+ switch (imm3) {
+ case 1:
+ Widen<uint8_t, uint16_t>(this, Vd, Vm);
break;
- }
- case Neon32: {
- switch (size) {
- case Neon16: {
- uint16_t src[8];
- get_neon_register(Vm, src);
- for (int i = 0; i < 8; i += 2) {
- std::swap(src[i], src[i + 1]);
- }
- set_neon_register(Vd, src);
- break;
- }
- case Neon8: {
- uint8_t src[16];
- get_neon_register(Vm, src);
- for (int i = 0; i < 4; i++) {
- std::swap(src[i * 4], src[i * 4 + 3]);
- std::swap(src[i * 4 + 1], src[i * 4 + 2]);
- }
- set_neon_register(Vd, src);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
+ case 2:
+ Widen<uint16_t, uint32_t>(this, Vd, Vm);
break;
- }
- case Neon64: {
- switch (size) {
- case Neon32: {
- uint32_t src[4];
- get_neon_register(Vm, src);
- std::swap(src[0], src[1]);
- std::swap(src[2], src[3]);
- set_neon_register(Vd, src);
- break;
- }
- case Neon16: {
- uint16_t src[8];
- get_neon_register(Vm, src);
- for (int i = 0; i < 2; i++) {
- std::swap(src[i * 4], src[i * 4 + 3]);
- std::swap(src[i * 4 + 1], src[i * 4 + 2]);
- }
- set_neon_register(Vd, src);
- break;
- }
- case Neon8: {
- uint8_t src[16];
- get_neon_register(Vm, src);
- for (int i = 0; i < 4; i++) {
- std::swap(src[i], src[7 - i]);
- std::swap(src[i + 8], src[15 - i]);
- }
- set_neon_register(Vd, src);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
+ case 4:
+ Widen<uint32_t, uint64_t>(this, Vd, Vm);
break;
- }
default:
- UNREACHABLE();
+ UNIMPLEMENTED();
break;
}
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0x1) {
- NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
- if (instr->Bit(6) == 0) {
- int Vd = instr->VFPDRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- // vtrn.<size> Dd, Dm.
- switch (size) {
- case Neon8:
- Transpose<uint8_t, kDoubleSize>(this, Vd, Vm);
- break;
- case Neon16:
- Transpose<uint16_t, kDoubleSize>(this, Vd, Vm);
- break;
- case Neon32:
- Transpose<uint32_t, kDoubleSize>(this, Vd, Vm);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- // vtrn.<size> Qd, Qm.
- switch (size) {
- case Neon8:
- Transpose<uint8_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon16:
- Transpose<uint16_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon32:
- Transpose<uint32_t, kSimd128Size>(this, Vd, Vm);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- } else if (instr->Bits(17, 16) == 0x1 && instr->Bit(11) == 0) {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
- if (instr->Bits(9, 6) == 0xD) {
- // vabs<type>.<size> Qd, Qm
- if (instr->Bit(10) != 0) {
- // floating point (clear sign bits)
- uint32_t src[4];
- get_neon_register(Vm, src);
- for (int i = 0; i < 4; i++) {
- src[i] &= ~0x80000000;
- }
- set_neon_register(Vd, src);
- } else {
- // signed integer
- switch (size) {
- case Neon8:
- Abs<int8_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon16:
- Abs<int16_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon32:
- Abs<int32_t, kSimd128Size>(this, Vd, Vm);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- }
- } else if (instr->Bits(9, 6) == 0xF) {
- // vneg<type>.<size> Qd, Qm (signed integer)
- if (instr->Bit(10) != 0) {
- // floating point (toggle sign bits)
- uint32_t src[4];
- get_neon_register(Vm, src);
- for (int i = 0; i < 4; i++) {
- src[i] ^= 0x80000000;
- }
- set_neon_register(Vd, src);
- } else {
- // signed integer
- switch (size) {
- case Neon8:
- Neg<int8_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon16:
- Neg<int16_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon32:
- Neg<int32_t, kSimd128Size>(this, Vd, Vm);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- }
- } else {
- UNIMPLEMENTED();
- }
- } else if (instr->Bits(19, 18) == 0x2 && instr->Bits(17, 16) == 0x3 &&
- instr->Bits(11, 8) == 0x5) {
- // vrecpe/vrsqrte.f32 Qd, Qm.
+ } else {
+ // vmovl signed
+ if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- uint32_t src[4];
- get_neon_register(Vm, src);
- if (instr->Bit(7) == 0) {
- for (int i = 0; i < 4; i++) {
- float denom = bit_cast<float>(src[i]);
- div_zero_vfp_flag_ = (denom == 0);
- float result = 1.0f / denom;
- result = canonicalizeNaN(result);
- src[i] = bit_cast<uint32_t>(result);
- }
- } else {
- for (int i = 0; i < 4; i++) {
- float radicand = bit_cast<float>(src[i]);
- float result = 1.0f / std::sqrt(radicand);
- result = canonicalizeNaN(result);
- src[i] = bit_cast<uint32_t>(result);
- }
- }
- set_neon_register(Vd, src);
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 8) == 0x2 &&
- instr->Bits(7, 6) != 0) {
- // vqmovn.<type><size> Dd, Qm.
- int Vd = instr->VFPDRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
- bool dst_unsigned = instr->Bit(6) != 0;
- bool src_unsigned = instr->Bits(7, 6) == 0b11;
- DCHECK_IMPLIES(src_unsigned, dst_unsigned);
- switch (size) {
- case Neon8: {
- if (src_unsigned) {
- SaturatingNarrow<uint16_t, uint8_t>(this, Vd, Vm);
- } else if (dst_unsigned) {
- SaturatingUnsignedNarrow<int16_t, uint8_t>(this, Vd, Vm);
- } else {
- SaturatingNarrow<int16_t, int8_t>(this, Vd, Vm);
- }
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ int imm3 = instr->Bits(21, 19);
+ switch (imm3) {
+ case 1:
+ Widen<int8_t, int16_t>(this, Vd, Vm);
break;
- }
- case Neon16: {
- if (src_unsigned) {
- SaturatingNarrow<uint32_t, uint16_t>(this, Vd, Vm);
- } else if (dst_unsigned) {
- SaturatingUnsignedNarrow<int32_t, uint16_t>(this, Vd, Vm);
- } else {
- SaturatingNarrow<int32_t, int16_t>(this, Vd, Vm);
- }
+ case 2:
+ Widen<int16_t, int32_t>(this, Vd, Vm);
break;
- }
- case Neon32: {
- if (src_unsigned) {
- SaturatingNarrow<uint64_t, uint32_t>(this, Vd, Vm);
- } else if (dst_unsigned) {
- SaturatingUnsignedNarrow<int64_t, uint32_t>(this, Vd, Vm);
- } else {
- SaturatingNarrow<int64_t, int32_t>(this, Vd, Vm);
- }
+ case 4:
+ Widen<int32_t, int64_t>(this, Vd, Vm);
break;
- }
default:
UNIMPLEMENTED();
break;
}
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bit(10) == 1) {
- // vrint<q>.<dt> <Dd>, <Dm>
- // vrint<q>.<dt> <Qd>, <Qm>
- // See F6.1.205
- int regs = instr->Bit(6) + 1;
- int rounding_mode = instr->Bits(9, 7);
- float (*fproundint)(float) = nullptr;
- switch (rounding_mode) {
- case 0:
- fproundint = &nearbyintf;
- break;
- case 3:
- fproundint = &truncf;
- break;
- case 5:
- fproundint = &floorf;
- break;
- case 7:
- fproundint = &ceilf;
- break;
- default:
- UNIMPLEMENTED();
- }
- int vm = instr->VFPMRegValue(kDoublePrecision);
- int vd = instr->VFPDRegValue(kDoublePrecision);
-
- float floats[2];
- for (int r = 0; r < regs; r++) {
- // We cannot simply use GetVFPSingleValue since our Q registers
- // might not map to any S registers at all.
- get_neon_register<float, kDoubleSize>(vm + r, floats);
- for (int e = 0; e < 2; e++) {
- floats[e] = canonicalizeNaN(fproundint(floats[e]));
- }
- set_neon_register<float, kDoubleSize>(vd + r, floats);
- }
- } else {
- UNIMPLEMENTED();
}
- } else if (instr->Bits(11, 8) == 0 && instr->Bit(4) == 1) {
- // vshr.u<size> Qd, Qm, shift
+ } else if (!u && imm3H_L != 0 && opc == 0b0101) {
+ // vshl.i<size> Qd, Qm, shift
int imm7 = instr->Bits(21, 16);
if (instr->Bit(7) != 0) imm7 += 64;
int size = base::bits::RoundDownToPowerOfTwo32(imm7);
- int shift = 2 * size - imm7;
+ int shift = imm7 - size;
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
NeonSize ns =
static_cast<NeonSize>(base::bits::WhichPowerOfTwo(size >> 3));
switch (ns) {
case Neon8:
- ShiftRight<uint8_t, kSimd128Size>(this, Vd, Vm, shift);
+ ShiftLeft<uint8_t, kSimd128Size>(this, Vd, Vm, shift);
break;
case Neon16:
- ShiftRight<uint16_t, kSimd128Size>(this, Vd, Vm, shift);
+ ShiftLeft<uint16_t, kSimd128Size>(this, Vd, Vm, shift);
break;
case Neon32:
- ShiftRight<uint32_t, kSimd128Size>(this, Vd, Vm, shift);
+ ShiftLeft<uint32_t, kSimd128Size>(this, Vd, Vm, shift);
break;
case Neon64:
- ShiftRight<uint64_t, kSimd128Size>(this, Vd, Vm, shift);
+ ShiftLeft<uint64_t, kSimd128Size>(this, Vd, Vm, shift);
break;
}
- } else if (instr->Bits(11, 8) == 0x5 && instr->Bit(6) == 0 &&
- instr->Bit(4) == 1) {
- // vsli.<size> Dd, Dm, shift
+ } else if (u && imm3H_L != 0 && opc == 0b0100) {
+ // vsri.<size> Dd, Dm, shift
int imm7 = instr->Bits(21, 16);
if (instr->Bit(7) != 0) imm7 += 64;
int size = base::bits::RoundDownToPowerOfTwo32(imm7);
- int shift = imm7 - size;
+ int shift = 2 * size - imm7;
int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
switch (size) {
case 8:
- ShiftLeftAndInsert<uint8_t, kDoubleSize>(this, Vd, Vm, shift);
+ ShiftRightAndInsert<uint8_t, kDoubleSize>(this, Vd, Vm, shift);
break;
case 16:
- ShiftLeftAndInsert<uint16_t, kDoubleSize>(this, Vd, Vm, shift);
+ ShiftRightAndInsert<uint16_t, kDoubleSize>(this, Vd, Vm, shift);
break;
case 32:
- ShiftLeftAndInsert<uint32_t, kDoubleSize>(this, Vd, Vm, shift);
+ ShiftRightAndInsert<uint32_t, kDoubleSize>(this, Vd, Vm, shift);
break;
case 64:
- ShiftLeftAndInsert<uint64_t, kDoubleSize>(this, Vd, Vm, shift);
+ ShiftRightAndInsert<uint64_t, kDoubleSize>(this, Vd, Vm, shift);
break;
default:
UNREACHABLE();
break;
}
- } else if (instr->Bits(11, 8) == 0x4 && instr->Bit(6) == 0 &&
- instr->Bit(4) == 1) {
- // vsri.<size> Dd, Dm, shift
+ } else if (u && imm3H_L != 0 && opc == 0b0101) {
+ // vsli.<size> Dd, Dm, shift
int imm7 = instr->Bits(21, 16);
if (instr->Bit(7) != 0) imm7 += 64;
int size = base::bits::RoundDownToPowerOfTwo32(imm7);
- int shift = 2 * size - imm7;
+ int shift = imm7 - size;
int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
switch (size) {
case 8:
- ShiftRightAndInsert<uint8_t, kDoubleSize>(this, Vd, Vm, shift);
+ ShiftLeftAndInsert<uint8_t, kDoubleSize>(this, Vd, Vm, shift);
break;
case 16:
- ShiftRightAndInsert<uint16_t, kDoubleSize>(this, Vd, Vm, shift);
+ ShiftLeftAndInsert<uint16_t, kDoubleSize>(this, Vd, Vm, shift);
break;
case 32:
- ShiftRightAndInsert<uint32_t, kDoubleSize>(this, Vd, Vm, shift);
+ ShiftLeftAndInsert<uint32_t, kDoubleSize>(this, Vd, Vm, shift);
break;
case 64:
- ShiftRightAndInsert<uint64_t, kDoubleSize>(this, Vd, Vm, shift);
+ ShiftLeftAndInsert<uint64_t, kDoubleSize>(this, Vd, Vm, shift);
break;
default:
UNREACHABLE();
break;
}
- } else if (instr->Bits(11, 8) == 0x8 && instr->Bit(6) == 0 &&
- instr->Bit(4) == 0) {
- // vmlal.u<size> Qd, Dn, Dm
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- if (size != Neon32) UNIMPLEMENTED();
+ }
+ }
+ return;
+ }
+}
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- uint64_t src1, src2, dst[2];
-
- get_neon_register<uint64_t>(Vd, dst);
- get_d_register(Vn, &src1);
- get_d_register(Vm, &src2);
- dst[0] += (src1 & 0xFFFFFFFFULL) * (src2 & 0xFFFFFFFFULL);
- dst[1] += (src1 >> 32) * (src2 >> 32);
- set_neon_register<uint64_t>(Vd, dst);
- } else if (instr->Bits(11, 8) == 0xC && instr->Bit(6) == 0 &&
- instr->Bit(4) == 0) {
- // vmull.u<size> Qd, Dn, Dm
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- switch (size) {
- case Neon32: {
- MultiplyLong<uint32_t, uint64_t>(this, Vd, Vn, Vm);
- break;
- }
- default:
- UNIMPLEMENTED();
- }
- } else if (instr->Bits(21, 19) == 0 && instr->Bit(7) == 0 &&
- instr->Bit(4) == 1) {
- // vmov (immediate), see ARM DDI 0487F.b F6.1.134, decoding A4.
- // Similar to vmov (immediate above), but when high bit of immediate is
- // set.
- VmovImmediate(this, instr);
+void Simulator::DecodeMemoryHintsAndBarriers(Instruction* instr) {
+ switch (instr->SpecialValue()) {
+ case 0xA:
+ case 0xB:
+ if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xF)) {
+ // pld: ignore instruction.
+ } else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
+ // dsb, dmb, isb: ignore instruction for now.
+ // TODO(binji): implement
+ // Also refer to the ARMv6 CP15 equivalents in DecodeTypeCP15.
} else {
UNIMPLEMENTED();
}
break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::DecodeAdvancedSIMDElementOrStructureLoadStore(
+ Instruction* instr) {
+ switch (instr->SpecialValue()) {
case 8:
if (instr->Bits(21, 20) == 0) {
// vst1
@@ -5745,7 +5657,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
case 9: {
if (instr->Bits(21, 20) == 2) {
// Bits(11, 8) is the B field in A7.7 Advanced SIMD element or structure
- // load/store instructions.
+ // load/store instructions. See table A7-21.
if (instr->Bits(11, 8) == 0xC) {
// vld1 (single element to all lanes).
DCHECK_EQ(instr->Bits(11, 8), 0b1100); // Type field.
@@ -5791,6 +5703,53 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
set_register(Rn, get_register(Rn) + get_register(Rm));
}
}
+ } else if (instr->Bits(11, 8) == 8 ||
+ ((instr->Bits(11, 8) & 0b1011) == 0)) {
+ // vld1 (single element to one lane)
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int Rm = instr->VmValue();
+ int32_t address = get_register(Rn);
+ int size = instr->Bits(11, 10);
+ uint64_t dreg;
+ get_d_register(Vd, &dreg);
+ switch (size) {
+ case Neon8: {
+ uint64_t data = ReadBU(address);
+ DCHECK_EQ(0, instr->Bit(4));
+ int i = instr->Bits(7, 5) * 8;
+ dreg = (dreg & ~(uint64_t{0xff} << i)) | (data << i);
+ break;
+ }
+ case Neon16: {
+ DCHECK_EQ(0, instr->Bits(5, 4)); // Alignment not supported.
+ uint64_t data = ReadHU(address);
+ int i = instr->Bits(7, 6) * 16;
+ dreg = (dreg & ~(uint64_t{0xffff} << i)) | (data << i);
+ break;
+ }
+ case Neon32: {
+ DCHECK_EQ(0, instr->Bits(6, 4)); // Alignment not supported.
+ uint64_t data = static_cast<unsigned>(ReadW(address));
+ int i = instr->Bit(7) * 32;
+ dreg = (dreg & ~(uint64_t{0xffffffff} << i)) | (data << i);
+ break;
+ }
+ case Neon64: {
+ // Should have been handled by vld1 (single element to all lanes).
+ UNREACHABLE();
+ }
+ }
+ set_d_register(Vd, &dreg);
+
+ // write back
+ if (Rm != 15) {
+ if (Rm == 13) {
+ set_register(Rn, address);
+ } else {
+ set_register(Rn, get_register(Rn) + get_register(Rm));
+ }
+ }
} else {
UNIMPLEMENTED();
}
@@ -5799,18 +5758,13 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xA:
- case 0xB:
- if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xF)) {
- // pld: ignore instruction.
- } else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
- // dsb, dmb, isb: ignore instruction for now.
- // TODO(binji): implement
- // Also refer to the ARMv6 CP15 equivalents in DecodeTypeCP15.
- } else {
- UNIMPLEMENTED();
- }
- break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::DecodeFloatingPointDataProcessing(Instruction* instr) {
+ switch (instr->SpecialValue()) {
case 0x1D:
if (instr->Opc1Value() == 0x7 && instr->Opc3Value() == 0x1 &&
instr->Bits(11, 9) == 0x5 && instr->Bits(19, 18) == 0x2) {
@@ -5979,6 +5933,21 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
}
+void Simulator::DecodeSpecialCondition(Instruction* instr) {
+ int op0 = instr->Bits(25, 24);
+ int op1 = instr->Bits(11, 9);
+ int op2 = instr->Bit(4);
+
+ if (instr->Bit(27) == 0) {
+ DecodeUnconditional(instr);
+ } else if ((instr->Bits(27, 26) == 0b11) && (op0 == 0b10) &&
+ ((op1 >> 1) == 0b10) && !op2) {
+ DecodeFloatingPointDataProcessing(instr);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
diff --git a/deps/v8/src/execution/arm/simulator-arm.h b/deps/v8/src/execution/arm/simulator-arm.h
index e577e0f815..84f857d5da 100644
--- a/deps/v8/src/execution/arm/simulator-arm.h
+++ b/deps/v8/src/execution/arm/simulator-arm.h
@@ -386,6 +386,13 @@ class Simulator : public SimulatorBase {
void DecodeType6CoprocessorIns(Instruction* instr);
void DecodeSpecialCondition(Instruction* instr);
+ void DecodeFloatingPointDataProcessing(Instruction* instr);
+ void DecodeUnconditional(Instruction* instr);
+ void DecodeAdvancedSIMDDataProcessing(Instruction* instr);
+ void DecodeMemoryHintsAndBarriers(Instruction* instr);
+ void DecodeAdvancedSIMDElementOrStructureLoadStore(Instruction* instr);
+ void DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr);
+
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
diff --git a/deps/v8/src/execution/arm64/frame-constants-arm64.h b/deps/v8/src/execution/arm64/frame-constants-arm64.h
index 409fcec504..fba69f917d 100644
--- a/deps/v8/src/execution/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/execution/arm64/frame-constants-arm64.h
@@ -15,6 +15,7 @@ namespace internal {
// The layout of an EntryFrame is as follows:
//
+// BOTTOM OF THE STACK HIGHEST ADDRESS
// slot Entry frame
// +---------------------+-----------------------
// -20 | saved register d15 |
@@ -45,6 +46,7 @@ namespace internal {
// |- - - - - - - - - - -|
// 5 | padding | <-- stack ptr
// -----+---------------------+-----------------------
+// TOP OF THE STACK LOWEST ADDRESS
//
class EntryFrameConstants : public AllStatic {
public:
diff --git a/deps/v8/src/execution/arm64/pointer-auth-arm64.cc b/deps/v8/src/execution/arm64/pointer-auth-arm64.cc
index eaa88445ec..7f4eeeb0ac 100644
--- a/deps/v8/src/execution/arm64/pointer-auth-arm64.cc
+++ b/deps/v8/src/execution/arm64/pointer-auth-arm64.cc
@@ -232,6 +232,9 @@ uint64_t Simulator::AuthPAC(uint64_t ptr, uint64_t context, PACKey key,
} else {
int error_lsb = GetTopPACBit(ptr, type) - 2;
uint64_t error_mask = UINT64_C(0x3) << error_lsb;
+ if (FLAG_sim_abort_on_bad_auth) {
+ FATAL("Pointer authentication failure.");
+ }
return (original_ptr & ~error_mask) | (error_code << error_lsb);
}
}
diff --git a/deps/v8/src/execution/execution.cc b/deps/v8/src/execution/execution.cc
index d780074861..3da4cbdbaf 100644
--- a/deps/v8/src/execution/execution.cc
+++ b/deps/v8/src/execution/execution.cc
@@ -194,10 +194,10 @@ MaybeHandle<Context> NewScriptContext(Isolate* isolate,
// If envRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
// exception.
MessageLocation location(script, 0, 1);
- isolate->ThrowAt(isolate->factory()->NewSyntaxError(
- MessageTemplate::kVarRedeclaration, name),
- &location);
- return MaybeHandle<Context>();
+ return isolate->ThrowAt<Context>(
+ isolate->factory()->NewSyntaxError(
+ MessageTemplate::kVarRedeclaration, name),
+ &location);
}
}
}
@@ -216,10 +216,10 @@ MaybeHandle<Context> NewScriptContext(Isolate* isolate,
// ES#sec-globaldeclarationinstantiation 5.d:
// If hasRestrictedGlobal is true, throw a SyntaxError exception.
MessageLocation location(script, 0, 1);
- isolate->ThrowAt(isolate->factory()->NewSyntaxError(
- MessageTemplate::kVarRedeclaration, name),
- &location);
- return MaybeHandle<Context>();
+ return isolate->ThrowAt<Context>(
+ isolate->factory()->NewSyntaxError(
+ MessageTemplate::kVarRedeclaration, name),
+ &location);
}
JSGlobalObject::InvalidatePropertyCell(global_object, name);
diff --git a/deps/v8/src/execution/external-pointer-table.cc b/deps/v8/src/execution/external-pointer-table.cc
new file mode 100644
index 0000000000..5b199ae3cf
--- /dev/null
+++ b/deps/v8/src/execution/external-pointer-table.cc
@@ -0,0 +1,22 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/external-pointer-table.h"
+
+namespace v8 {
+namespace internal {
+
+void ExternalPointerTable::GrowTable(ExternalPointerTable* table) {
+ // TODO(v8:10391, saelo): overflow check here and in the multiplication below
+ uint32_t new_capacity = table->capacity_ + table->capacity_ / 2;
+ table->buffer_ = reinterpret_cast<Address*>(
+ realloc(table->buffer_, new_capacity * sizeof(Address)));
+ CHECK(table->buffer_);
+ memset(&table->buffer_[table->capacity_], 0,
+ (new_capacity - table->capacity_) * sizeof(Address));
+ table->capacity_ = new_capacity;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/execution/external-pointer-table.h b/deps/v8/src/execution/external-pointer-table.h
new file mode 100644
index 0000000000..7774a39248
--- /dev/null
+++ b/deps/v8/src/execution/external-pointer-table.h
@@ -0,0 +1,80 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_EXTERNAL_POINTER_TABLE_H_
+#define V8_EXECUTION_EXTERNAL_POINTER_TABLE_H_
+
+#include "src/common/external-pointer.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class V8_EXPORT_PRIVATE ExternalPointerTable {
+ public:
+ static const int kExternalPointerTableInitialCapacity = 1024;
+
+ ExternalPointerTable()
+ : buffer_(reinterpret_cast<Address*>(
+ calloc(kExternalPointerTableInitialCapacity, sizeof(Address)))),
+ length_(1),
+ capacity_(kExternalPointerTableInitialCapacity),
+ freelist_head_(0) {
+ // Explicitly setup the invalid nullptr entry.
+ STATIC_ASSERT(kNullExternalPointer == 0);
+ buffer_[kNullExternalPointer] = kNullAddress;
+ }
+
+ ~ExternalPointerTable() { ::free(buffer_); }
+
+ Address get(uint32_t index) const {
+ CHECK_LT(index, length_);
+ return buffer_[index];
+ }
+
+ void set(uint32_t index, Address value) {
+ DCHECK_NE(kNullExternalPointer, index);
+ CHECK_LT(index, length_);
+ buffer_[index] = value;
+ }
+
+ uint32_t allocate() {
+ uint32_t index = length_++;
+ if (index >= capacity_) {
+ GrowTable(this);
+ }
+ DCHECK_NE(kNullExternalPointer, index);
+ return index;
+ }
+
+ void free(uint32_t index) {
+ // TODO(v8:10391, saelo): implement simple free list here, i.e. set
+ // buffer_[index] to freelist_head_ and set freelist_head
+ // to index
+ DCHECK_NE(kNullExternalPointer, index);
+ }
+
+ // Returns true if the entry exists in the table and therefore it can be read.
+ bool is_valid_index(uint32_t index) const {
+ // TODO(v8:10391, saelo): also check here if entry is free
+ return index < length_;
+ }
+
+ uint32_t size() const { return length_; }
+
+ static void GrowTable(ExternalPointerTable* table);
+
+ private:
+ friend class Isolate;
+
+ Address* buffer_;
+ uint32_t length_;
+ uint32_t capacity_;
+ uint32_t freelist_head_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_EXTERNAL_POINTER_TABLE_H_
diff --git a/deps/v8/src/execution/frame-constants.h b/deps/v8/src/execution/frame-constants.h
index 6c037451a2..1c0a1f65f0 100644
--- a/deps/v8/src/execution/frame-constants.h
+++ b/deps/v8/src/execution/frame-constants.h
@@ -21,18 +21,15 @@ namespace internal {
// header, with slot index 2 corresponding to the current function context and 3
// corresponding to the frame marker/JSFunction.
//
-// If V8_REVERSE_JSARGS is set, then the parameters are reversed in the stack,
-// i.e., the first parameter (the receiver) is just above the return address.
-//
// slot JS frame
// +-----------------+--------------------------------
-// -n-1 | parameter 0 | ^
+// -n-1 | parameter n | ^
// |- - - - - - - - -| |
-// -n | | Caller
+// -n | parameter n-1 | Caller
// ... | ... | frame slots
-// -2 | parameter n-1 | (slot < 0)
+// -2 | parameter 1 | (slot < 0)
// |- - - - - - - - -| |
-// -1 | parameter n | v
+// -1 | parameter 0 | v
// -----+-----------------+--------------------------------
// 0 | return addr | ^ ^
// |- - - - - - - - -| | |
@@ -59,7 +56,7 @@ class CommonFrameConstants : public AllStatic {
// Fixed part of the frame consists of return address, caller fp,
// constant pool (if FLAG_enable_embedded_constant_pool), context, and
- // function. StandardFrame::IterateExpressions assumes that kLastObjectOffset
+ // function. CommonFrame::IterateExpressions assumes that kLastObjectOffset
// is the last object pointer.
static constexpr int kFixedFrameSizeAboveFp = kPCOnStackSize + kFPOnStackSize;
static constexpr int kFixedSlotCountAboveFp =
@@ -82,13 +79,13 @@ class CommonFrameConstants : public AllStatic {
//
// slot JS frame
// +-----------------+--------------------------------
-// -n-1 | parameter 0 | ^
+// -n-1 | parameter n | ^
// |- - - - - - - - -| |
-// -n | | Caller
+// -n | parameter n-1 | Caller
// ... | ... | frame slots
-// -2 | parameter n-1 | (slot < 0)
+// -2 | parameter 1 | (slot < 0)
// |- - - - - - - - -| |
-// -1 | parameter n | v
+// -1 | parameter 0 | v
// -----+-----------------+--------------------------------
// 0 | return addr | ^ ^
// |- - - - - - - - -| | |
@@ -133,13 +130,13 @@ class StandardFrameConstants : public CommonFrameConstants {
//
// slot JS frame
// +-----------------+--------------------------------
-// -n-1 | parameter 0 | ^
+// -n-1 | parameter n | ^
// |- - - - - - - - -| |
-// -n | | Caller
+// -n | parameter n-1 | Caller
// ... | ... | frame slots
-// -2 | parameter n-1 | (slot < 0)
+// -2 | parameter 1 | (slot < 0)
// |- - - - - - - - -| |
-// -1 | parameter n | v
+// -1 | parameter 0 | v
// -----+-----------------+--------------------------------
// 0 | return addr | ^ ^
// |- - - - - - - - -| | |
@@ -305,18 +302,13 @@ class InterpreterFrameConstants : public StandardFrameConstants {
STANDARD_FRAME_EXTRA_PUSHED_VALUE_OFFSET(1);
DEFINE_STANDARD_FRAME_SIZES(2);
-#ifdef V8_REVERSE_JSARGS
static constexpr int kFirstParamFromFp =
StandardFrameConstants::kCallerSPOffset;
-#else
- static constexpr int kLastParamFromFp =
- StandardFrameConstants::kCallerSPOffset;
-#endif
static constexpr int kRegisterFileFromFp =
-kFixedFrameSizeFromFp - kSystemPointerSize;
static constexpr int kExpressionsOffset = kRegisterFileFromFp;
- // Expression index for {StandardFrame::GetExpressionAddress}.
+ // Expression index for {JavaScriptFrame::GetExpressionAddress}.
static constexpr int kBytecodeArrayExpressionIndex = -2;
static constexpr int kBytecodeOffsetExpressionIndex = -1;
static constexpr int kRegisterFileExpressionIndex = 0;
diff --git a/deps/v8/src/execution/frames-inl.h b/deps/v8/src/execution/frames-inl.h
index e56db9ee4a..3cee9d5855 100644
--- a/deps/v8/src/execution/frames-inl.h
+++ b/deps/v8/src/execution/frames-inl.h
@@ -65,7 +65,6 @@ inline StackFrame::StackFrame(StackFrameIteratorBase* iterator)
: iterator_(iterator), isolate_(iterator_->isolate()) {
}
-
inline StackHandler* StackFrame::top_handler() const {
return iterator_->handler();
}
@@ -95,22 +94,29 @@ inline Address* StackFrame::ResolveReturnAddressLocation(Address* pc_address) {
}
}
-inline NativeFrame::NativeFrame(StackFrameIteratorBase* iterator)
- : StackFrame(iterator) {}
+inline TypedFrame::TypedFrame(StackFrameIteratorBase* iterator)
+ : CommonFrame(iterator) {}
-inline Address NativeFrame::GetCallerStackPointer() const {
- return fp() + CommonFrameConstants::kCallerSPOffset;
-}
+inline CommonFrameWithJSLinkage::CommonFrameWithJSLinkage(
+ StackFrameIteratorBase* iterator)
+ : CommonFrame(iterator) {}
+
+inline TypedFrameWithJSLinkage::TypedFrameWithJSLinkage(
+ StackFrameIteratorBase* iterator)
+ : CommonFrameWithJSLinkage(iterator) {}
+
+inline NativeFrame::NativeFrame(StackFrameIteratorBase* iterator)
+ : TypedFrame(iterator) {}
inline EntryFrame::EntryFrame(StackFrameIteratorBase* iterator)
- : StackFrame(iterator) {}
+ : TypedFrame(iterator) {}
inline ConstructEntryFrame::ConstructEntryFrame(
StackFrameIteratorBase* iterator)
: EntryFrame(iterator) {}
inline ExitFrame::ExitFrame(StackFrameIteratorBase* iterator)
- : StackFrame(iterator) {}
+ : TypedFrame(iterator) {}
inline BuiltinExitFrame::BuiltinExitFrame(StackFrameIteratorBase* iterator)
: ExitFrame(iterator) {}
@@ -124,17 +130,8 @@ inline Object BuiltinExitFrame::receiver_slot_object() const {
// fp[4]: argc.
// fp[5]: hole.
// ------- JS stack arguments ------
- // fp[6]: receiver, if V8_REVERSE_JSARGS.
- // fp[2 + argc - 1]: receiver, if not V8_REVERSE_JSARGS.
-#ifdef V8_REVERSE_JSARGS
+ // fp[6]: receiver
const int receiverOffset = BuiltinExitFrameConstants::kFirstArgumentOffset;
-#else
- Object argc_slot = argc_slot_object();
- DCHECK(argc_slot.IsSmi());
- int argc = Smi::ToInt(argc_slot);
- const int receiverOffset = BuiltinExitFrameConstants::kNewTargetOffset +
- (argc - 1) * kSystemPointerSize;
-#endif
return Object(base::Memory<Address>(fp() + receiverOffset));
}
@@ -153,72 +150,67 @@ inline Object BuiltinExitFrame::new_target_slot_object() const {
fp() + BuiltinExitFrameConstants::kNewTargetOffset));
}
-inline StandardFrame::StandardFrame(StackFrameIteratorBase* iterator)
- : StackFrame(iterator) {
-}
+inline CommonFrame::CommonFrame(StackFrameIteratorBase* iterator)
+ : StackFrame(iterator) {}
-inline Object StandardFrame::GetExpression(int index) const {
+inline Object CommonFrame::GetExpression(int index) const {
return Object(base::Memory<Address>(GetExpressionAddress(index)));
}
-inline void StandardFrame::SetExpression(int index, Object value) {
+inline void CommonFrame::SetExpression(int index, Object value) {
base::Memory<Address>(GetExpressionAddress(index)) = value.ptr();
}
-inline Address StandardFrame::caller_fp() const {
+inline Address CommonFrame::caller_fp() const {
return base::Memory<Address>(fp() + StandardFrameConstants::kCallerFPOffset);
}
-
-inline Address StandardFrame::caller_pc() const {
+inline Address CommonFrame::caller_pc() const {
return base::Memory<Address>(ComputePCAddress(fp()));
}
-
-inline Address StandardFrame::ComputePCAddress(Address fp) {
+inline Address CommonFrame::ComputePCAddress(Address fp) {
return fp + StandardFrameConstants::kCallerPCOffset;
}
-
-inline Address StandardFrame::ComputeConstantPoolAddress(Address fp) {
+inline Address CommonFrame::ComputeConstantPoolAddress(Address fp) {
return fp + StandardFrameConstants::kConstantPoolOffset;
}
-
-inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
+inline bool CommonFrame::IsArgumentsAdaptorFrame(Address fp) {
intptr_t frame_type =
base::Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
return frame_type == StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR);
}
-
-inline bool StandardFrame::IsConstructFrame(Address fp) {
+inline bool CommonFrameWithJSLinkage::IsConstructFrame(Address fp) {
intptr_t frame_type =
base::Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
return frame_type == StackFrame::TypeToMarker(StackFrame::CONSTRUCT);
}
inline JavaScriptFrame::JavaScriptFrame(StackFrameIteratorBase* iterator)
- : StandardFrame(iterator) {}
+ : CommonFrameWithJSLinkage(iterator) {}
-Address JavaScriptFrame::GetParameterSlot(int index) const {
+Address CommonFrameWithJSLinkage::GetParameterSlot(int index) const {
DCHECK_LE(-1, index);
#ifdef V8_NO_ARGUMENTS_ADAPTOR
DCHECK_LT(index,
- std::max(GetActualArgumentsCount(), ComputeParametersCount()));
+ std::max(GetActualArgumentCount(), ComputeParametersCount()));
#else
DCHECK(index < ComputeParametersCount() ||
ComputeParametersCount() == kDontAdaptArgumentsSentinel);
#endif
-#ifdef V8_REVERSE_JSARGS
int parameter_offset = (index + 1) * kSystemPointerSize;
-#else
- int param_count = ComputeParametersCount();
- int parameter_offset = (param_count - index - 1) * kSystemPointerSize;
-#endif
return caller_sp() + parameter_offset;
}
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+inline int CommonFrameWithJSLinkage::GetActualArgumentCount() const {
+ return 0;
+}
+#endif
+
inline void JavaScriptFrame::set_receiver(Object value) {
base::Memory<Address>(GetParameterSlot(-1)) = value.ptr();
}
@@ -233,15 +225,12 @@ inline Object JavaScriptFrame::function_slot_object() const {
}
inline StubFrame::StubFrame(StackFrameIteratorBase* iterator)
- : StandardFrame(iterator) {
-}
-
+ : TypedFrame(iterator) {}
inline OptimizedFrame::OptimizedFrame(StackFrameIteratorBase* iterator)
: JavaScriptFrame(iterator) {
}
-
inline InterpretedFrame::InterpretedFrame(StackFrameIteratorBase* iterator)
: JavaScriptFrame(iterator) {}
@@ -251,17 +240,17 @@ inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
}
inline BuiltinFrame::BuiltinFrame(StackFrameIteratorBase* iterator)
- : JavaScriptFrame(iterator) {}
+ : TypedFrameWithJSLinkage(iterator) {}
inline WasmFrame::WasmFrame(StackFrameIteratorBase* iterator)
- : StandardFrame(iterator) {}
+ : TypedFrame(iterator) {}
inline WasmExitFrame::WasmExitFrame(StackFrameIteratorBase* iterator)
: WasmFrame(iterator) {}
inline WasmDebugBreakFrame::WasmDebugBreakFrame(
StackFrameIteratorBase* iterator)
- : StandardFrame(iterator) {}
+ : TypedFrame(iterator) {}
inline WasmToJsFrame::WasmToJsFrame(StackFrameIteratorBase* iterator)
: StubFrame(iterator) {}
@@ -274,11 +263,10 @@ inline CWasmEntryFrame::CWasmEntryFrame(StackFrameIteratorBase* iterator)
inline WasmCompileLazyFrame::WasmCompileLazyFrame(
StackFrameIteratorBase* iterator)
- : StandardFrame(iterator) {}
+ : TypedFrame(iterator) {}
inline InternalFrame::InternalFrame(StackFrameIteratorBase* iterator)
- : StandardFrame(iterator) {
-}
+ : TypedFrame(iterator) {}
inline ConstructFrame::ConstructFrame(StackFrameIteratorBase* iterator)
: InternalFrame(iterator) {
@@ -290,7 +278,7 @@ inline BuiltinContinuationFrame::BuiltinContinuationFrame(
inline JavaScriptBuiltinContinuationFrame::JavaScriptBuiltinContinuationFrame(
StackFrameIteratorBase* iterator)
- : JavaScriptFrame(iterator) {}
+ : TypedFrameWithJSLinkage(iterator) {}
inline JavaScriptBuiltinContinuationWithCatchFrame::
JavaScriptBuiltinContinuationWithCatchFrame(
@@ -319,11 +307,11 @@ inline JavaScriptFrame* JavaScriptFrameIterator::frame() const {
return static_cast<JavaScriptFrame*>(frame);
}
-inline StandardFrame* StackTraceFrameIterator::frame() const {
+inline CommonFrame* StackTraceFrameIterator::frame() const {
StackFrame* frame = iterator_.frame();
DCHECK(frame->is_java_script() || frame->is_arguments_adaptor() ||
frame->is_wasm());
- return static_cast<StandardFrame*>(frame);
+ return static_cast<CommonFrame*>(frame);
}
bool StackTraceFrameIterator::is_javascript() const {
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index d7aa13c3ec..3288f53c8d 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -156,6 +156,13 @@ StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type) {
// -------------------------------------------------------------------------
+void TypedFrameWithJSLinkage::Iterate(RootVisitor* v) const {
+ IterateExpressions(v);
+ IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
+}
+
+// -------------------------------------------------------------------------
+
void JavaScriptFrameIterator::Advance() {
do {
iterator_.Advance();
@@ -316,7 +323,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
state.fp = fp;
state.sp = sp;
state.pc_address = StackFrame::ResolveReturnAddressLocation(
- reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp)));
+ reinterpret_cast<Address*>(CommonFrame::ComputePCAddress(fp)));
// If the current PC is in a bytecode handler, the top stack frame isn't
// the bytecode handler's frame and the top of stack or link register is a
@@ -589,8 +596,9 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
return OPTIMIZED;
}
return BUILTIN;
- case CodeKind::OPTIMIZED_FUNCTION:
+ case CodeKind::TURBOFAN:
case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
+ case CodeKind::TURBOPROP:
return OPTIMIZED;
case CodeKind::JS_TO_WASM_FUNCTION:
return JS_TO_WASM;
@@ -598,9 +606,10 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
return STUB;
case CodeKind::C_WASM_ENTRY:
return C_WASM_ENTRY;
+ case CodeKind::WASM_TO_JS_FUNCTION:
+ return WASM_TO_JS;
case CodeKind::WASM_FUNCTION:
case CodeKind::WASM_TO_CAPI_FUNCTION:
- case CodeKind::WASM_TO_JS_FUNCTION:
// Never appear as on-heap {Code} objects.
UNREACHABLE();
default:
@@ -655,9 +664,9 @@ StackFrame::Type StackFrame::GetCallerState(State* state) const {
return ComputeType(iterator_, state);
}
-Address StackFrame::UnpaddedFP() const { return fp(); }
-
-Code NativeFrame::unchecked_code() const { return Code(); }
+Address CommonFrame::GetCallerStackPointer() const {
+ return fp() + CommonFrameConstants::kCallerSPOffset;
+}
void NativeFrame::ComputeCallerState(State* state) const {
state->sp = caller_sp();
@@ -692,8 +701,6 @@ Code ConstructEntryFrame::unchecked_code() const {
return isolate()->heap()->builtin(Builtins::kJSConstructEntry);
}
-Code ExitFrame::unchecked_code() const { return Code(); }
-
void ExitFrame::ComputeCallerState(State* state) const {
// Set up the caller state.
state->sp = caller_sp();
@@ -713,10 +720,6 @@ void ExitFrame::Iterate(RootVisitor* v) const {
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
-Address ExitFrame::GetCallerStackPointer() const {
- return fp() + ExitFrameConstants::kCallerSPOffset;
-}
-
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
StackFrame::Type type = ComputeFrameType(fp);
@@ -854,7 +857,7 @@ void BuiltinExitFrame::Print(StringStream* accumulator, PrintMode mode,
accumulator->Add(")\n\n");
}
-Address StandardFrame::GetExpressionAddress(int n) const {
+Address CommonFrame::GetExpressionAddress(int n) const {
const int offset = StandardFrameConstants::kExpressionsOffset;
return fp() + offset - n * kSystemPointerSize;
}
@@ -864,27 +867,17 @@ Address InterpretedFrame::GetExpressionAddress(int n) const {
return fp() + offset - n * kSystemPointerSize;
}
-Script StandardFrame::script() const {
- // This should only be called on frames which override this method.
- UNREACHABLE();
- return Script();
-}
-
-Object StandardFrame::receiver() const {
- return ReadOnlyRoots(isolate()).undefined_value();
-}
-
-Object StandardFrame::context() const {
+Object CommonFrame::context() const {
return ReadOnlyRoots(isolate()).undefined_value();
}
-int StandardFrame::position() const {
+int CommonFrame::position() const {
AbstractCode code = AbstractCode::cast(LookupCode());
int code_offset = static_cast<int>(pc() - code.InstructionStart());
return code.SourcePosition(code_offset);
}
-int StandardFrame::ComputeExpressionsCount() const {
+int CommonFrame::ComputeExpressionsCount() const {
Address base = GetExpressionAddress(0);
Address limit = sp() - kSystemPointerSize;
DCHECK(base >= limit); // stack grows downwards
@@ -892,14 +885,7 @@ int StandardFrame::ComputeExpressionsCount() const {
return static_cast<int>((base - limit) / kSystemPointerSize);
}
-Object StandardFrame::GetParameter(int index) const {
- // StandardFrame does not define any parameters.
- UNREACHABLE();
-}
-
-int StandardFrame::ComputeParametersCount() const { return 0; }
-
-void StandardFrame::ComputeCallerState(State* state) const {
+void CommonFrame::ComputeCallerState(State* state) const {
state->sp = caller_sp();
state->fp = caller_fp();
state->pc_address = ResolveReturnAddressLocation(
@@ -910,14 +896,12 @@ void StandardFrame::ComputeCallerState(State* state) const {
reinterpret_cast<Address*>(ComputeConstantPoolAddress(fp()));
}
-bool StandardFrame::IsConstructor() const { return false; }
-
-void StandardFrame::Summarize(std::vector<FrameSummary>* functions) const {
+void CommonFrame::Summarize(std::vector<FrameSummary>* functions) const {
// This should only be called on frames which override this method.
UNREACHABLE();
}
-void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
+void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
// Make sure that we're not doing "safe" stack frame iteration. We cannot
// possibly find pointers in optimized frames in that state.
DCHECK(can_access_heap_objects());
@@ -1080,16 +1064,10 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
frame_header_limit);
}
-void StubFrame::Iterate(RootVisitor* v) const { IterateCompiledFrame(v); }
-
Code StubFrame::unchecked_code() const {
return isolate()->FindCodeObject(pc());
}
-Address StubFrame::GetCallerStackPointer() const {
- return fp() + ExitFrameConstants::kCallerSPOffset;
-}
-
int StubFrame::LookupExceptionHandlerInTable() {
Code code = LookupCode();
DCHECK(code.is_turbofanned());
@@ -1120,7 +1098,9 @@ bool JavaScriptFrame::HasInlinedFrames() const {
return functions.size() > 1;
}
-Code JavaScriptFrame::unchecked_code() const { return function().code(); }
+Code CommonFrameWithJSLinkage::unchecked_code() const {
+ return function().code();
+}
int OptimizedFrame::ComputeParametersCount() const {
Code code = LookupCode();
@@ -1153,7 +1133,12 @@ void JavaScriptFrame::GetFunctions(
}
}
-void JavaScriptFrame::Summarize(std::vector<FrameSummary>* functions) const {
+bool CommonFrameWithJSLinkage::IsConstructor() const {
+ return IsConstructFrame(caller_fp());
+}
+
+void CommonFrameWithJSLinkage::Summarize(
+ std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
Code code = LookupCode();
int offset = static_cast<int>(pc() - code.InstructionStart());
@@ -1178,7 +1163,7 @@ Object JavaScriptFrame::unchecked_function() const {
return function_slot_object();
}
-Object JavaScriptFrame::receiver() const { return GetParameter(-1); }
+Object CommonFrameWithJSLinkage::receiver() const { return GetParameter(-1); }
Object JavaScriptFrame::context() const {
const int offset = StandardFrameConstants::kContextOffset;
@@ -1191,7 +1176,7 @@ Script JavaScriptFrame::script() const {
return Script::cast(function().shared().script());
}
-int JavaScriptFrame::LookupExceptionHandlerInTable(
+int CommonFrameWithJSLinkage::LookupExceptionHandlerInTable(
int* stack_depth, HandlerTable::CatchPrediction* prediction) {
DCHECK(!LookupCode().has_handler_table());
DCHECK(!LookupCode().is_optimized_code());
@@ -1286,11 +1271,11 @@ void JavaScriptFrame::CollectFunctionAndOffsetForICStats(JSFunction function,
}
}
-Object JavaScriptFrame::GetParameter(int index) const {
+Object CommonFrameWithJSLinkage::GetParameter(int index) const {
return Object(Memory<Address>(GetParameterSlot(index)));
}
-int JavaScriptFrame::ComputeParametersCount() const {
+int CommonFrameWithJSLinkage::ComputeParametersCount() const {
DCHECK(can_access_heap_objects() &&
isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
return function().shared().internal_formal_parameter_count();
@@ -1303,7 +1288,7 @@ int JavaScriptFrame::GetActualArgumentCount() const {
}
#endif
-Handle<FixedArray> JavaScriptFrame::GetParameters() const {
+Handle<FixedArray> CommonFrameWithJSLinkage::GetParameters() const {
if (V8_LIKELY(!FLAG_detailed_error_stack_trace)) {
return isolate()->factory()->empty_fixed_array();
}
@@ -1317,6 +1302,11 @@ Handle<FixedArray> JavaScriptFrame::GetParameters() const {
return parameters;
}
+JSFunction JavaScriptBuiltinContinuationFrame::function() const {
+ const int offset = BuiltinContinuationFrameConstants::kFunctionOffset;
+ return JSFunction::cast(Object(base::Memory<Address>(fp() + offset)));
+}
+
int JavaScriptBuiltinContinuationFrame::ComputeParametersCount() const {
// Assert that the first allocatable register is also the argument count
// register.
@@ -1341,16 +1331,10 @@ Object JavaScriptBuiltinContinuationFrame::context() const {
void JavaScriptBuiltinContinuationWithCatchFrame::SetException(
Object exception) {
-#ifdef V8_REVERSE_JSARGS
int argc = ComputeParametersCount();
Address exception_argument_slot =
fp() + BuiltinContinuationFrameConstants::kFixedFrameSizeAboveFp +
(argc - 1) * kSystemPointerSize;
-#else
- Address exception_argument_slot =
- fp() + BuiltinContinuationFrameConstants::kFixedFrameSizeAboveFp +
- kSystemPointerSize; // Skip over return value slot.
-#endif
// Only allow setting exception if previous value was the hole.
CHECK_EQ(ReadOnlyRoots(isolate()).the_hole_value(),
@@ -1476,25 +1460,25 @@ FrameSummary::~FrameSummary() {
#undef FRAME_SUMMARY_DESTR
}
-FrameSummary FrameSummary::GetTop(const StandardFrame* frame) {
+FrameSummary FrameSummary::GetTop(const CommonFrame* frame) {
std::vector<FrameSummary> frames;
frame->Summarize(&frames);
DCHECK_LT(0, frames.size());
return frames.back();
}
-FrameSummary FrameSummary::GetBottom(const StandardFrame* frame) {
+FrameSummary FrameSummary::GetBottom(const CommonFrame* frame) {
return Get(frame, 0);
}
-FrameSummary FrameSummary::GetSingle(const StandardFrame* frame) {
+FrameSummary FrameSummary::GetSingle(const CommonFrame* frame) {
std::vector<FrameSummary> frames;
frame->Summarize(&frames);
DCHECK_EQ(1, frames.size());
return frames.front();
}
-FrameSummary FrameSummary::Get(const StandardFrame* frame, int index) {
+FrameSummary FrameSummary::Get(const CommonFrame* frame, int index) {
DCHECK_LE(0, index);
std::vector<FrameSummary> frames;
frame->Summarize(&frames);
@@ -1652,23 +1636,6 @@ DeoptimizationData OptimizedFrame::GetDeoptimizationData(
return DeoptimizationData();
}
-#ifndef V8_REVERSE_JSARGS
-Object OptimizedFrame::receiver() const {
- Code code = LookupCode();
- if (code.kind() == CodeKind::BUILTIN) {
- intptr_t argc = static_cast<int>(
- Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset));
- intptr_t args_size =
- (StandardFrameConstants::kFixedSlotCountAboveFp + argc) *
- kSystemPointerSize;
- Address receiver_ptr = fp() + args_size;
- return *FullObjectSlot(receiver_ptr);
- } else {
- return JavaScriptFrame::receiver();
- }
-}
-#endif
-
void OptimizedFrame::GetFunctions(
std::vector<SharedFunctionInfo>* functions) const {
DCHECK(functions->empty());
@@ -1823,21 +1790,16 @@ Code ArgumentsAdaptorFrame::unchecked_code() const {
return isolate()->builtins()->builtin(Builtins::kArgumentsAdaptorTrampoline);
}
+JSFunction BuiltinFrame::function() const {
+ const int offset = BuiltinFrameConstants::kFunctionOffset;
+ return JSFunction::cast(Object(base::Memory<Address>(fp() + offset)));
+}
+
int BuiltinFrame::ComputeParametersCount() const {
const int offset = BuiltinFrameConstants::kLengthOffset;
return Smi::ToInt(Object(base::Memory<Address>(fp() + offset)));
}
-void BuiltinFrame::PrintFrameKind(StringStream* accumulator) const {
- accumulator->Add("builtin frame: ");
-}
-
-Address InternalFrame::GetCallerStackPointer() const {
- // Internal frames have no arguments. The stack pointer of the
- // caller is at a fixed offset from the frame pointer.
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
Code InternalFrame::unchecked_code() const { return Code(); }
void WasmFrame::Print(StringStream* accumulator, PrintMode mode,
@@ -1872,12 +1834,6 @@ Code WasmFrame::unchecked_code() const {
return isolate()->FindCodeObject(pc());
}
-void WasmFrame::Iterate(RootVisitor* v) const { IterateCompiledFrame(v); }
-
-Address WasmFrame::GetCallerStackPointer() const {
- return fp() + ExitFrameConstants::kCallerSPOffset;
-}
-
wasm::WasmCode* WasmFrame::wasm_code() const {
return isolate()->wasm_engine()->code_manager()->LookupCode(pc());
}
@@ -1967,8 +1923,6 @@ void WasmDebugBreakFrame::Iterate(RootVisitor* v) const {
// Liftoff.
}
-Code WasmDebugBreakFrame::unchecked_code() const { return Code(); }
-
void WasmDebugBreakFrame::Print(StringStream* accumulator, PrintMode mode,
int index) const {
PrintIndex(accumulator, mode, index);
@@ -1976,12 +1930,6 @@ void WasmDebugBreakFrame::Print(StringStream* accumulator, PrintMode mode,
if (mode != OVERVIEW) accumulator->Add("\n");
}
-Address WasmDebugBreakFrame::GetCallerStackPointer() const {
- // WasmDebugBreak does not receive any arguments, hence the stack pointer of
- // the caller is at a fixed offset from the frame pointer.
- return fp() + WasmDebugBreakFrameConstants::kCallerSPOffset;
-}
-
void JsToWasmFrame::Iterate(RootVisitor* v) const {
Code code = GetContainingCode(isolate(), pc());
// GenericJSToWasmWrapper stack layout
@@ -2016,8 +1964,6 @@ void JsToWasmFrame::Iterate(RootVisitor* v) const {
v->VisitRootPointers(Root::kTop, nullptr, spill_slot_base, spill_slot_limit);
}
-Code WasmCompileLazyFrame::unchecked_code() const { return Code(); }
-
WasmInstanceObject WasmCompileLazyFrame::wasm_instance() const {
return WasmInstanceObject::cast(*wasm_instance_slot());
}
@@ -2035,10 +1981,6 @@ void WasmCompileLazyFrame::Iterate(RootVisitor* v) const {
v->VisitRootPointer(Root::kTop, nullptr, wasm_instance_slot());
}
-Address WasmCompileLazyFrame::GetCallerStackPointer() const {
- return fp() + WasmCompileLazyFrameConstants::kCallerSPOffset;
-}
-
namespace {
void PrintFunctionSource(StringStream* accumulator, SharedFunctionInfo shared,
@@ -2201,7 +2143,7 @@ void EntryFrame::Iterate(RootVisitor* v) const {
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
-void StandardFrame::IterateExpressions(RootVisitor* v) const {
+void CommonFrame::IterateExpressions(RootVisitor* v) const {
const int last_object_offset = StandardFrameConstants::kLastObjectOffset;
intptr_t marker =
Memory<intptr_t>(fp() + CommonFrameConstants::kContextOrFrameTypeOffset);
@@ -2304,7 +2246,7 @@ bool BuiltinContinuationModeIsWithCatch(BuiltinContinuationMode mode) {
InterpretedFrameInfo::InterpretedFrameInfo(int parameters_count_with_receiver,
int translation_height,
- bool is_topmost,
+ bool is_topmost, bool pad_arguments,
FrameInfoKind frame_info_kind) {
const int locals_count = translation_height;
@@ -2325,7 +2267,7 @@ InterpretedFrameInfo::InterpretedFrameInfo(int parameters_count_with_receiver,
// the part described by InterpreterFrameConstants. This will include
// argument padding, when needed.
const int parameter_padding_slots =
- ArgumentPaddingSlots(parameters_count_with_receiver);
+ pad_arguments ? ArgumentPaddingSlots(parameters_count_with_receiver) : 0;
const int fixed_frame_size =
InterpreterFrameConstants::kFixedFrameSize +
(parameters_count_with_receiver + parameter_padding_slots) *
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index eb627a158a..43f9d383c2 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -11,6 +11,36 @@
#include "src/objects/code.h"
#include "src/objects/objects.h"
+//
+// Frame inheritance hierarchy (please keep in sync with frame-constants.h):
+// - CommonFrame
+// - CommonFrameWithJSLinkage
+// - JavaScriptFrame (aka StandardFrame)
+// - InterpretedFrame
+// - OptimizedFrame
+// - ArgumentsAdaptorFrame (technically a TypedFrame)
+// - TypedFrameWithJSLinkage
+// - BuiltinFrame
+// - JavaScriptBuiltinContinuationFrame
+// - JavaScriptBuiltinContinuationWithCatchFrame
+// - TypedFrame
+// - NativeFrame
+// - EntryFrame
+// - ConstructEntryFrame
+// - ExitFrame
+// - BuiltinExitFrame
+// - StubFrame
+// - JsToWasmFrame
+// - CWasmEntryFrame
+// - Internal
+// - ConstructFrame
+// - BuiltinContinuationFrame
+// - WasmFrame
+// - WasmExitFrame
+// - WasmDebugBreakFrame
+// - WasmCompileLazyFrame
+//
+
namespace v8 {
namespace internal {
namespace wasm {
@@ -196,13 +226,10 @@ class StackFrame {
}
bool is_construct() const { return type() == CONSTRUCT; }
bool is_builtin_exit() const { return type() == BUILTIN_EXIT; }
- virtual bool is_standard() const { return false; }
bool is_java_script() const {
Type type = this->type();
- return (type == OPTIMIZED) || (type == INTERPRETED) || (type == BUILTIN) ||
- (type == JAVA_SCRIPT_BUILTIN_CONTINUATION) ||
- (type == JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH);
+ return (type == OPTIMIZED) || (type == INTERPRETED);
}
bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
@@ -213,12 +240,6 @@ class StackFrame {
Address callee_fp() const { return state_.callee_fp; }
inline Address callee_pc() const;
Address caller_sp() const { return GetCallerStackPointer(); }
-
- // If this frame is optimized and was dynamically aligned return its old
- // unaligned frame pointer. When the frame is deoptimized its FP will shift
- // up one word and become unaligned.
- Address UnpaddedFP() const;
-
inline Address pc() const;
// Skip authentication of the PC, when using CFI. Used in the profiler, where
@@ -312,145 +333,7 @@ class StackFrame {
friend class SafeStackFrameIterator;
};
-class NativeFrame : public StackFrame {
- public:
- Type type() const override { return NATIVE; }
-
- Code unchecked_code() const override;
-
- // Garbage collection support.
- void Iterate(RootVisitor* v) const override {}
-
- protected:
- inline explicit NativeFrame(StackFrameIteratorBase* iterator);
-
- Address GetCallerStackPointer() const override;
-
- private:
- void ComputeCallerState(State* state) const override;
-
- friend class StackFrameIteratorBase;
-};
-
-// Entry frames are used to enter JavaScript execution from C.
-class EntryFrame : public StackFrame {
- public:
- Type type() const override { return ENTRY; }
-
- Code unchecked_code() const override;
-
- // Garbage collection support.
- void Iterate(RootVisitor* v) const override;
-
- static EntryFrame* cast(StackFrame* frame) {
- DCHECK(frame->is_entry());
- return static_cast<EntryFrame*>(frame);
- }
-
- protected:
- inline explicit EntryFrame(StackFrameIteratorBase* iterator);
-
- // The caller stack pointer for entry frames is always zero. The
- // real information about the caller frame is available through the
- // link to the top exit frame.
- Address GetCallerStackPointer() const override { return 0; }
-
- private:
- void ComputeCallerState(State* state) const override;
- Type GetCallerState(State* state) const override;
-
- friend class StackFrameIteratorBase;
-};
-
-class ConstructEntryFrame : public EntryFrame {
- public:
- Type type() const override { return CONSTRUCT_ENTRY; }
-
- Code unchecked_code() const override;
-
- static ConstructEntryFrame* cast(StackFrame* frame) {
- DCHECK(frame->is_construct_entry());
- return static_cast<ConstructEntryFrame*>(frame);
- }
-
- protected:
- inline explicit ConstructEntryFrame(StackFrameIteratorBase* iterator);
-
- private:
- friend class StackFrameIteratorBase;
-};
-
-// Exit frames are used to exit JavaScript execution and go to C.
-class ExitFrame : public StackFrame {
- public:
- Type type() const override { return EXIT; }
-
- Code unchecked_code() const override;
-
- // Garbage collection support.
- void Iterate(RootVisitor* v) const override;
-
- static ExitFrame* cast(StackFrame* frame) {
- DCHECK(frame->is_exit());
- return static_cast<ExitFrame*>(frame);
- }
-
- // Compute the state and type of an exit frame given a frame
- // pointer. Used when constructing the first stack frame seen by an
- // iterator and the frames following entry frames.
- static Type GetStateForFramePointer(Address fp, State* state);
- static Address ComputeStackPointer(Address fp);
- static StackFrame::Type ComputeFrameType(Address fp);
- static void FillState(Address fp, Address sp, State* state);
-
- protected:
- inline explicit ExitFrame(StackFrameIteratorBase* iterator);
-
- Address GetCallerStackPointer() const override;
-
- private:
- void ComputeCallerState(State* state) const override;
-
- friend class StackFrameIteratorBase;
-};
-
-// Builtin exit frames are a special case of exit frames, which are used
-// whenever C++ builtins (e.g., Math.acos) are called. Their main purpose is
-// to allow such builtins to appear in stack traces.
-class BuiltinExitFrame : public ExitFrame {
- public:
- Type type() const override { return BUILTIN_EXIT; }
-
- static BuiltinExitFrame* cast(StackFrame* frame) {
- DCHECK(frame->is_builtin_exit());
- return static_cast<BuiltinExitFrame*>(frame);
- }
-
- JSFunction function() const;
- Object receiver() const;
-
- bool IsConstructor() const;
-
- void Print(StringStream* accumulator, PrintMode mode,
- int index) const override;
-
- protected:
- inline explicit BuiltinExitFrame(StackFrameIteratorBase* iterator);
-
- private:
- Object GetParameter(int i) const;
- int ComputeParametersCount() const;
-
- inline Object receiver_slot_object() const;
- inline Object argc_slot_object() const;
- inline Object target_slot_object() const;
- inline Object new_target_slot_object() const;
-
- friend class StackFrameIteratorBase;
- friend class FrameArrayBuilder;
-};
-
-class StandardFrame;
+class CommonFrame;
class V8_EXPORT_PRIVATE FrameSummary {
public:
@@ -541,10 +424,10 @@ class V8_EXPORT_PRIVATE FrameSummary {
~FrameSummary();
- static FrameSummary GetTop(const StandardFrame* frame);
- static FrameSummary GetBottom(const StandardFrame* frame);
- static FrameSummary GetSingle(const StandardFrame* frame);
- static FrameSummary Get(const StandardFrame* frame, int index);
+ static FrameSummary GetTop(const CommonFrame* frame);
+ static FrameSummary GetBottom(const CommonFrame* frame);
+ static FrameSummary GetSingle(const CommonFrame* frame);
+ static FrameSummary Get(const CommonFrame* frame, int index);
void EnsureSourcePositionsAvailable();
bool AreSourcePositionsAvailable() const;
@@ -578,15 +461,11 @@ class V8_EXPORT_PRIVATE FrameSummary {
#undef FRAME_SUMMARY_FIELD
};
-class StandardFrame : public StackFrame {
+class CommonFrame : public StackFrame {
public:
- // Testers.
- bool is_standard() const override { return true; }
-
// Accessors.
- virtual Object receiver() const;
- virtual Script script() const;
- virtual Object context() const;
+ virtual Object context()
+ const; // TODO(victorgomes): CommonFrames don't have context.
virtual int position() const;
// Access the expressions in the stack frame including locals.
@@ -594,25 +473,20 @@ class StandardFrame : public StackFrame {
inline void SetExpression(int index, Object value);
int ComputeExpressionsCount() const;
- // Access the parameters.
- virtual Object GetParameter(int index) const;
- virtual int ComputeParametersCount() const;
-
- // Check if this frame is a constructor frame invoked through 'new'.
- virtual bool IsConstructor() const;
+ Address GetCallerStackPointer() const override;
// Build a list with summaries for this frame including all inlined frames.
// The functions are ordered bottom-to-top (i.e. summaries.last() is the
// top-most activation; caller comes before callee).
virtual void Summarize(std::vector<FrameSummary>* frames) const;
- static StandardFrame* cast(StackFrame* frame) {
- DCHECK(frame->is_standard());
- return static_cast<StandardFrame*>(frame);
+ static CommonFrame* cast(StackFrame* frame) {
+ // It is always safe to cast to common.
+ return static_cast<CommonFrame*>(frame);
}
protected:
- inline explicit StandardFrame(StackFrameIteratorBase* iterator);
+ inline explicit CommonFrame(StackFrameIteratorBase* iterator);
void ComputeCallerState(State* state) const override;
@@ -639,10 +513,6 @@ class StandardFrame : public StackFrame {
// an arguments adaptor frame.
static inline bool IsArgumentsAdaptorFrame(Address fp);
- // Determines if the standard frame for the given frame pointer is a
- // construct frame.
- static inline bool IsConstructFrame(Address fp);
-
// Used by OptimizedFrames and StubFrames.
void IterateCompiledFrame(RootVisitor* v) const;
@@ -651,29 +521,77 @@ class StandardFrame : public StackFrame {
friend class SafeStackFrameIterator;
};
-class JavaScriptFrame : public StandardFrame {
+class TypedFrame : public CommonFrame {
public:
- Type type() const override = 0;
+ Code unchecked_code() const override { return Code(); }
+ void Iterate(RootVisitor* v) const override { IterateCompiledFrame(v); }
+
+ protected:
+ inline explicit TypedFrame(StackFrameIteratorBase* iterator);
+};
+class CommonFrameWithJSLinkage : public CommonFrame {
+ public:
+ // Accessors.
+ virtual JSFunction function() const = 0;
+
+ // Access the parameters.
+ virtual Object receiver() const;
+ virtual Object GetParameter(int index) const;
+ virtual int ComputeParametersCount() const;
+ Handle<FixedArray> GetParameters() const;
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ virtual int GetActualArgumentCount() const;
+#endif
+
+ // Determine the code for the frame.
+ Code unchecked_code() const override;
+
+ // Lookup exception handler for current {pc}, returns -1 if none found. Also
+ // returns data associated with the handler site specific to the frame type:
+ // - OptimizedFrame : Data is not used and will not return a value.
+ // - InterpretedFrame: Data is the register index holding the context.
+ virtual int LookupExceptionHandlerInTable(
+ int* data, HandlerTable::CatchPrediction* prediction);
+
+ // Check if this frame is a constructor frame invoked through 'new'.
+ virtual bool IsConstructor() const;
+
+ // Summarize Frame
void Summarize(std::vector<FrameSummary>* frames) const override;
+ protected:
+ inline explicit CommonFrameWithJSLinkage(StackFrameIteratorBase* iterator);
+
+ // Determines if the standard frame for the given frame pointer is a
+ // construct frame.
+ static inline bool IsConstructFrame(Address fp);
+ inline Address GetParameterSlot(int index) const;
+};
+
+class TypedFrameWithJSLinkage : public CommonFrameWithJSLinkage {
+ public:
+ void Iterate(RootVisitor* v) const override;
+
+ protected:
+ inline explicit TypedFrameWithJSLinkage(StackFrameIteratorBase* iterator);
+};
+
+class JavaScriptFrame : public CommonFrameWithJSLinkage {
+ public:
+ Type type() const override = 0;
+
// Accessors.
- virtual JSFunction function() const;
+ JSFunction function() const override;
Object unchecked_function() const;
- Object receiver() const override;
+ Script script() const;
Object context() const override;
- Script script() const override;
-
- inline void set_receiver(Object value);
- // Access the parameters.
- inline Address GetParameterSlot(int index) const;
- Object GetParameter(int index) const override;
- int ComputeParametersCount() const override;
#ifdef V8_NO_ARGUMENTS_ADAPTOR
- int GetActualArgumentCount() const;
+ int GetActualArgumentCount() const override;
#endif
- Handle<FixedArray> GetParameters() const;
+
+ inline void set_receiver(Object value);
// Debugger access.
void SetParameterValue(int index, Object value) const;
@@ -697,21 +615,11 @@ class JavaScriptFrame : public StandardFrame {
void Print(StringStream* accumulator, PrintMode mode,
int index) const override;
- // Determine the code for the frame.
- Code unchecked_code() const override;
-
// Return a list with {SharedFunctionInfo} objects of this frame.
virtual void GetFunctions(std::vector<SharedFunctionInfo>* functions) const;
void GetFunctions(std::vector<Handle<SharedFunctionInfo>>* functions) const;
- // Lookup exception handler for current {pc}, returns -1 if none found. Also
- // returns data associated with the handler site specific to the frame type:
- // - OptimizedFrame : Data is not used and will not return a value.
- // - InterpretedFrame: Data is the register index holding the context.
- virtual int LookupExceptionHandlerInTable(
- int* data, HandlerTable::CatchPrediction* prediction);
-
// Architecture-specific register description.
static Register fp_register();
static Register context_register();
@@ -746,13 +654,139 @@ class JavaScriptFrame : public StandardFrame {
friend class StackFrameIteratorBase;
};
-class StubFrame : public StandardFrame {
+class NativeFrame : public TypedFrame {
public:
- Type type() const override { return STUB; }
+ Type type() const override { return NATIVE; }
- // GC support.
+ // Garbage collection support.
+ void Iterate(RootVisitor* v) const override {}
+
+ protected:
+ inline explicit NativeFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ void ComputeCallerState(State* state) const override;
+
+ friend class StackFrameIteratorBase;
+};
+
+// Entry frames are used to enter JavaScript execution from C.
+class EntryFrame : public TypedFrame {
+ public:
+ Type type() const override { return ENTRY; }
+
+ Code unchecked_code() const override;
+
+ // Garbage collection support.
void Iterate(RootVisitor* v) const override;
+ static EntryFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_entry());
+ return static_cast<EntryFrame*>(frame);
+ }
+
+ protected:
+ inline explicit EntryFrame(StackFrameIteratorBase* iterator);
+
+ // The caller stack pointer for entry frames is always zero. The
+ // real information about the caller frame is available through the
+ // link to the top exit frame.
+ Address GetCallerStackPointer() const override { return 0; }
+
+ private:
+ void ComputeCallerState(State* state) const override;
+ Type GetCallerState(State* state) const override;
+
+ friend class StackFrameIteratorBase;
+};
+
+class ConstructEntryFrame : public EntryFrame {
+ public:
+ Type type() const override { return CONSTRUCT_ENTRY; }
+
+ Code unchecked_code() const override;
+
+ static ConstructEntryFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_construct_entry());
+ return static_cast<ConstructEntryFrame*>(frame);
+ }
+
+ protected:
+ inline explicit ConstructEntryFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ friend class StackFrameIteratorBase;
+};
+
+// Exit frames are used to exit JavaScript execution and go to C.
+class ExitFrame : public TypedFrame {
+ public:
+ Type type() const override { return EXIT; }
+
+ // Garbage collection support.
+ void Iterate(RootVisitor* v) const override;
+
+ static ExitFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_exit());
+ return static_cast<ExitFrame*>(frame);
+ }
+
+ // Compute the state and type of an exit frame given a frame
+ // pointer. Used when constructing the first stack frame seen by an
+ // iterator and the frames following entry frames.
+ static Type GetStateForFramePointer(Address fp, State* state);
+ static Address ComputeStackPointer(Address fp);
+ static StackFrame::Type ComputeFrameType(Address fp);
+ static void FillState(Address fp, Address sp, State* state);
+
+ protected:
+ inline explicit ExitFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ void ComputeCallerState(State* state) const override;
+
+ friend class StackFrameIteratorBase;
+};
+
+// Builtin exit frames are a special case of exit frames, which are used
+// whenever C++ builtins (e.g., Math.acos) are called. Their main purpose is
+// to allow such builtins to appear in stack traces.
+class BuiltinExitFrame : public ExitFrame {
+ public:
+ Type type() const override { return BUILTIN_EXIT; }
+
+ static BuiltinExitFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_builtin_exit());
+ return static_cast<BuiltinExitFrame*>(frame);
+ }
+
+ JSFunction function() const;
+ Object receiver() const;
+ bool IsConstructor() const;
+
+ void Print(StringStream* accumulator, PrintMode mode,
+ int index) const override;
+
+ protected:
+ inline explicit BuiltinExitFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ Object GetParameter(int i) const;
+ int ComputeParametersCount() const;
+
+ inline Object receiver_slot_object() const;
+ inline Object argc_slot_object() const;
+ inline Object target_slot_object() const;
+ inline Object new_target_slot_object() const;
+
+ friend class StackFrameIteratorBase;
+ friend class FrameArrayBuilder;
+};
+
+class StubFrame : public TypedFrame {
+ public:
+ Type type() const override { return STUB; }
+
// Determine the code for the frame.
Code unchecked_code() const override;
@@ -763,8 +797,7 @@ class StubFrame : public StandardFrame {
protected:
inline explicit StubFrame(StackFrameIteratorBase* iterator);
- Address GetCallerStackPointer() const override;
-
+ private:
friend class StackFrameIteratorBase;
};
@@ -788,11 +821,6 @@ class OptimizedFrame : public JavaScriptFrame {
DeoptimizationData GetDeoptimizationData(int* deopt_index) const;
-#ifndef V8_REVERSE_JSARGS
- // When the arguments are reversed in the stack, receiver() is
- // inherited from JavaScriptFrame.
- Object receiver() const override;
-#endif
int ComputeParametersCount() const override;
static int StackSlotOffsetRelativeToFp(int slot_index);
@@ -857,6 +885,10 @@ class InterpretedFrame : public JavaScriptFrame {
// Arguments adaptor frames are automatically inserted below
// JavaScript frames when the actual number of parameters does not
// match the formal number of parameters.
+// NOTE: this inheritance is wrong, an ArgumentsAdaptorFrame should be
+// of type TypedFrame, but due to FrameInspector::javascript_frame(),
+// it needs to be seen as JavaScriptFrame.
+// This frame will however be deleted soon.
class ArgumentsAdaptorFrame : public JavaScriptFrame {
public:
Type type() const override { return ARGUMENTS_ADAPTOR; }
@@ -884,7 +916,7 @@ class ArgumentsAdaptorFrame : public JavaScriptFrame {
// Builtin frames are built for builtins with JavaScript linkage, such as
// various standard library functions (i.e. Math.asin, Math.floor, etc.).
-class BuiltinFrame final : public JavaScriptFrame {
+class BuiltinFrame final : public TypedFrameWithJSLinkage {
public:
Type type() const final { return BUILTIN; }
@@ -892,24 +924,21 @@ class BuiltinFrame final : public JavaScriptFrame {
DCHECK(frame->is_builtin());
return static_cast<BuiltinFrame*>(frame);
}
- int ComputeParametersCount() const final;
+
+ JSFunction function() const override;
+ int ComputeParametersCount() const override;
protected:
inline explicit BuiltinFrame(StackFrameIteratorBase* iterator);
- void PrintFrameKind(StringStream* accumulator) const override;
-
private:
friend class StackFrameIteratorBase;
};
-class WasmFrame : public StandardFrame {
+class WasmFrame : public TypedFrame {
public:
Type type() const override { return WASM; }
- // GC support.
- void Iterate(RootVisitor* v) const override;
-
// Printing support.
void Print(StringStream* accumulator, PrintMode mode,
int index) const override;
@@ -925,7 +954,7 @@ class WasmFrame : public StandardFrame {
V8_EXPORT_PRIVATE wasm::NativeModule* native_module() const;
wasm::WasmCode* wasm_code() const;
uint32_t function_index() const;
- Script script() const override;
+ Script script() const;
// Byte position in the module, or asm.js source position.
int position() const override;
Object context() const override;
@@ -944,8 +973,6 @@ class WasmFrame : public StandardFrame {
protected:
inline explicit WasmFrame(StackFrameIteratorBase* iterator);
- Address GetCallerStackPointer() const override;
-
private:
friend class StackFrameIteratorBase;
WasmModuleObject module_object() const;
@@ -963,15 +990,13 @@ class WasmExitFrame : public WasmFrame {
friend class StackFrameIteratorBase;
};
-class WasmDebugBreakFrame final : public StandardFrame {
+class WasmDebugBreakFrame final : public TypedFrame {
public:
Type type() const override { return WASM_DEBUG_BREAK; }
// GC support.
void Iterate(RootVisitor* v) const override;
- Code unchecked_code() const override;
-
void Print(StringStream* accumulator, PrintMode mode,
int index) const override;
@@ -983,8 +1008,6 @@ class WasmDebugBreakFrame final : public StandardFrame {
protected:
inline explicit WasmDebugBreakFrame(StackFrameIteratorBase*);
- Address GetCallerStackPointer() const override;
-
private:
friend class StackFrameIteratorBase;
};
@@ -1025,11 +1048,10 @@ class CWasmEntryFrame : public StubFrame {
Type GetCallerState(State* state) const override;
};
-class WasmCompileLazyFrame : public StandardFrame {
+class WasmCompileLazyFrame : public TypedFrame {
public:
Type type() const override { return WASM_COMPILE_LAZY; }
- Code unchecked_code() const override;
WasmInstanceObject wasm_instance() const;
FullObjectSlot wasm_instance_slot() const;
@@ -1044,13 +1066,11 @@ class WasmCompileLazyFrame : public StandardFrame {
protected:
inline explicit WasmCompileLazyFrame(StackFrameIteratorBase* iterator);
- Address GetCallerStackPointer() const override;
-
private:
friend class StackFrameIteratorBase;
};
-class InternalFrame : public StandardFrame {
+class InternalFrame : public TypedFrame {
public:
Type type() const override { return INTERNAL; }
@@ -1068,8 +1088,6 @@ class InternalFrame : public StandardFrame {
protected:
inline explicit InternalFrame(StackFrameIteratorBase* iterator);
- Address GetCallerStackPointer() const override;
-
private:
friend class StackFrameIteratorBase;
};
@@ -1108,7 +1126,7 @@ class BuiltinContinuationFrame : public InternalFrame {
friend class StackFrameIteratorBase;
};
-class JavaScriptBuiltinContinuationFrame : public JavaScriptFrame {
+class JavaScriptBuiltinContinuationFrame : public TypedFrameWithJSLinkage {
public:
Type type() const override { return JAVA_SCRIPT_BUILTIN_CONTINUATION; }
@@ -1117,6 +1135,7 @@ class JavaScriptBuiltinContinuationFrame : public JavaScriptFrame {
return static_cast<JavaScriptBuiltinContinuationFrame*>(frame);
}
+ JSFunction function() const override;
int ComputeParametersCount() const override;
intptr_t GetSPToFPDelta() const;
@@ -1236,7 +1255,7 @@ class V8_EXPORT_PRIVATE StackTraceFrameIterator {
void AdvanceOneFrame() { iterator_.Advance(); }
int FrameFunctionCount() const;
- inline StandardFrame* frame() const;
+ inline CommonFrame* frame() const;
inline bool is_javascript() const;
inline bool is_wasm() const;
@@ -1315,14 +1334,15 @@ enum class BuiltinContinuationMode {
class InterpretedFrameInfo {
public:
static InterpretedFrameInfo Precise(int parameters_count_with_receiver,
- int translation_height, bool is_topmost) {
+ int translation_height, bool is_topmost,
+ bool pad_arguments) {
return {parameters_count_with_receiver, translation_height, is_topmost,
- FrameInfoKind::kPrecise};
+ pad_arguments, FrameInfoKind::kPrecise};
}
static InterpretedFrameInfo Conservative(int parameters_count_with_receiver,
int locals_count) {
- return {parameters_count_with_receiver, locals_count, false,
+ return {parameters_count_with_receiver, locals_count, false, true,
FrameInfoKind::kConservative};
}
@@ -1337,7 +1357,7 @@ class InterpretedFrameInfo {
private:
InterpretedFrameInfo(int parameters_count_with_receiver,
int translation_height, bool is_topmost,
- FrameInfoKind frame_info_kind);
+ bool pad_arguments, FrameInfoKind frame_info_kind);
uint32_t register_stack_slot_count_;
uint32_t frame_size_in_bytes_without_fixed_;
diff --git a/deps/v8/src/execution/isolate-data.h b/deps/v8/src/execution/isolate-data.h
index 26acf4253c..c875d92f09 100644
--- a/deps/v8/src/execution/isolate-data.h
+++ b/deps/v8/src/execution/isolate-data.h
@@ -8,6 +8,7 @@
#include "src/builtins/builtins.h"
#include "src/codegen/constants-arch.h"
#include "src/codegen/external-reference-table.h"
+#include "src/execution/external-pointer-table.h"
#include "src/execution/stack-guard.h"
#include "src/execution/thread-local-top.h"
#include "src/roots/roots.h"
@@ -56,6 +57,10 @@ class IsolateData final {
static constexpr int builtin_entry_table_offset() {
return kBuiltinEntryTableOffset - kIsolateRootBias;
}
+ static constexpr int builtin_entry_slot_offset(Builtins::Name builtin_index) {
+ CONSTEXPR_DCHECK(Builtins::IsBuiltinId(builtin_index));
+ return builtin_entry_table_offset() + builtin_index * kSystemPointerSize;
+ }
// Root-register-relative offset of the builtins table.
static constexpr int builtins_table_offset() {
@@ -131,6 +136,7 @@ class IsolateData final {
V(kThreadLocalTopOffset, ThreadLocalTop::kSizeInBytes) \
V(kBuiltinEntryTableOffset, Builtins::builtin_count* kSystemPointerSize) \
V(kBuiltinsTableOffset, Builtins::builtin_count* kSystemPointerSize) \
+ FIELDS_HEAP_SANDBOX(V) \
V(kStackIsIterableOffset, kUInt8Size) \
/* This padding aligns IsolateData size by 8 bytes. */ \
V(kPaddingOffset, \
@@ -138,6 +144,13 @@ class IsolateData final {
/* Total size. */ \
V(kSize, 0)
+#ifdef V8_HEAP_SANDBOX
+#define FIELDS_HEAP_SANDBOX(V) \
+ V(kExternalPointerTableOffset, kSystemPointerSize * 3)
+#else
+#define FIELDS_HEAP_SANDBOX(V)
+#endif // V8_HEAP_SANDBOX
+
DEFINE_FIELD_OFFSET_CONSTANTS(0, FIELDS)
#undef FIELDS
@@ -172,6 +185,11 @@ class IsolateData final {
// The entries in this array are tagged pointers to Code objects.
Address builtins_[Builtins::builtin_count] = {};
+ // Table containing pointers to external objects.
+#ifdef V8_HEAP_SANDBOX
+ ExternalPointerTable external_pointer_table_;
+#endif
+
// Whether the SafeStackFrameIterator can successfully iterate the current
// stack. Only valid values are 0 or 1.
uint8_t stack_is_iterable_ = 1;
@@ -215,6 +233,10 @@ void IsolateData::AssertPredictableLayout() {
STATIC_ASSERT(offsetof(IsolateData, fast_c_call_caller_pc_) ==
kFastCCallCallerPCOffset);
STATIC_ASSERT(offsetof(IsolateData, stack_guard_) == kStackGuardOffset);
+#ifdef V8_HEAP_SANDBOX
+ STATIC_ASSERT(offsetof(IsolateData, external_pointer_table_) ==
+ kExternalPointerTableOffset);
+#endif
STATIC_ASSERT(offsetof(IsolateData, stack_is_iterable_) ==
kStackIsIterableOffset);
STATIC_ASSERT(sizeof(IsolateData) == IsolateData::kSize);
diff --git a/deps/v8/src/execution/isolate-inl.h b/deps/v8/src/execution/isolate-inl.h
index b3a84d01be..42f534c23e 100644
--- a/deps/v8/src/execution/isolate-inl.h
+++ b/deps/v8/src/execution/isolate-inl.h
@@ -17,10 +17,6 @@
namespace v8 {
namespace internal {
-IsolateAllocationMode Isolate::isolate_allocation_mode() {
- return isolate_allocator_->mode();
-}
-
void Isolate::set_context(Context context) {
DCHECK(context.is_null() || context.IsContext());
thread_local_top()->context_ = context;
diff --git a/deps/v8/src/execution/isolate-utils-inl.h b/deps/v8/src/execution/isolate-utils-inl.h
index 0c739eafd9..2cc66a473c 100644
--- a/deps/v8/src/execution/isolate-utils-inl.h
+++ b/deps/v8/src/execution/isolate-utils-inl.h
@@ -13,34 +13,19 @@
namespace v8 {
namespace internal {
-inline const Isolate* GetIsolateForPtrComprFromOnHeapAddress(Address address) {
+inline constexpr IsolateRoot GetIsolateForPtrComprFromOnHeapAddress(
+ Address address) {
#ifdef V8_COMPRESS_POINTERS
- return Isolate::FromRoot(GetIsolateRoot(address));
+ return IsolateRoot(GetIsolateRootAddress(address));
#else
- return nullptr;
+ return IsolateRoot();
#endif // V8_COMPRESS_POINTERS
}
-inline const Isolate* GetIsolateForPtrCompr(HeapObject object) {
+inline IsolateRoot GetIsolateForPtrCompr(HeapObject object) {
return GetIsolateForPtrComprFromOnHeapAddress(object.ptr());
}
-inline const Isolate* GetIsolateForPtrCompr(const Isolate* isolate) {
-#ifdef V8_COMPRESS_POINTERS
- return isolate;
-#else
- return nullptr;
-#endif // V8_COMPRESS_POINTERS
-}
-
-inline const Isolate* GetIsolateForPtrCompr(const LocalIsolate* isolate) {
-#ifdef V8_COMPRESS_POINTERS
- return isolate->GetIsolateForPtrCompr();
-#else
- return nullptr;
-#endif // V8_COMPRESS_POINTERS
-}
-
V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object) {
// Avoid using the below GetIsolateFromWritableObject because we want to be
// able to get the heap, but not the isolate, for off-thread objects.
@@ -48,7 +33,8 @@ V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object) {
#if defined V8_ENABLE_THIRD_PARTY_HEAP
return Heap::GetIsolateFromWritableObject(object)->heap();
#elif defined V8_COMPRESS_POINTERS
- Isolate* isolate = Isolate::FromRoot(GetIsolateRoot(object.ptr()));
+ Isolate* isolate =
+ Isolate::FromRootAddress(GetIsolateRootAddress(object.ptr()));
DCHECK_NOT_NULL(isolate);
return isolate->heap();
#else
@@ -62,7 +48,8 @@ V8_INLINE Isolate* GetIsolateFromWritableObject(HeapObject object) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
return Heap::GetIsolateFromWritableObject(object);
#elif defined V8_COMPRESS_POINTERS
- Isolate* isolate = Isolate::FromRoot(GetIsolateRoot(object.ptr()));
+ Isolate* isolate =
+ Isolate::FromRootAddress(GetIsolateRootAddress(object.ptr()));
DCHECK_NOT_NULL(isolate);
return isolate;
#else
diff --git a/deps/v8/src/execution/isolate-utils.h b/deps/v8/src/execution/isolate-utils.h
index 3b5505f765..2204b2cd96 100644
--- a/deps/v8/src/execution/isolate-utils.h
+++ b/deps/v8/src/execution/isolate-utils.h
@@ -14,7 +14,7 @@ namespace internal {
// value is intended to be used only as a hoisted computation of isolate root
// inside trivial accessors for optmizing value decompression.
// When pointer compression is disabled this function always returns nullptr.
-V8_INLINE const Isolate* GetIsolateForPtrCompr(HeapObject object);
+V8_INLINE IsolateRoot GetIsolateForPtrCompr(HeapObject object);
V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object);
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index c1c3bd1b24..1c1380061b 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -18,6 +18,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
#include "src/base/hashmap.h"
+#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/base/sys-info.h"
#include "src/base/utils/random-number-generator.h"
@@ -60,6 +61,7 @@
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/backing-store.h"
#include "src/objects/elements.h"
+#include "src/objects/feedback-vector.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
@@ -106,8 +108,8 @@
extern "C" const uint8_t* v8_Default_embedded_blob_code_;
extern "C" uint32_t v8_Default_embedded_blob_code_size_;
-extern "C" const uint8_t* v8_Default_embedded_blob_metadata_;
-extern "C" uint32_t v8_Default_embedded_blob_metadata_size_;
+extern "C" const uint8_t* v8_Default_embedded_blob_data_;
+extern "C" uint32_t v8_Default_embedded_blob_data_size_;
namespace v8 {
namespace internal {
@@ -130,18 +132,18 @@ const uint8_t* DefaultEmbeddedBlobCode() {
uint32_t DefaultEmbeddedBlobCodeSize() {
return v8_Default_embedded_blob_code_size_;
}
-const uint8_t* DefaultEmbeddedBlobMetadata() {
- return v8_Default_embedded_blob_metadata_;
+const uint8_t* DefaultEmbeddedBlobData() {
+ return v8_Default_embedded_blob_data_;
}
-uint32_t DefaultEmbeddedBlobMetadataSize() {
- return v8_Default_embedded_blob_metadata_size_;
+uint32_t DefaultEmbeddedBlobDataSize() {
+ return v8_Default_embedded_blob_data_size_;
}
#ifdef V8_MULTI_SNAPSHOTS
extern "C" const uint8_t* v8_Trusted_embedded_blob_code_;
extern "C" uint32_t v8_Trusted_embedded_blob_code_size_;
-extern "C" const uint8_t* v8_Trusted_embedded_blob_metadata_;
-extern "C" uint32_t v8_Trusted_embedded_blob_metadata_size_;
+extern "C" const uint8_t* v8_Trusted_embedded_blob_data_;
+extern "C" uint32_t v8_Trusted_embedded_blob_data_size_;
const uint8_t* TrustedEmbeddedBlobCode() {
return v8_Trusted_embedded_blob_code_;
@@ -149,11 +151,11 @@ const uint8_t* TrustedEmbeddedBlobCode() {
uint32_t TrustedEmbeddedBlobCodeSize() {
return v8_Trusted_embedded_blob_code_size_;
}
-const uint8_t* TrustedEmbeddedBlobMetadata() {
- return v8_Trusted_embedded_blob_metadata_;
+const uint8_t* TrustedEmbeddedBlobData() {
+ return v8_Trusted_embedded_blob_data_;
}
-uint32_t TrustedEmbeddedBlobMetadataSize() {
- return v8_Trusted_embedded_blob_metadata_size_;
+uint32_t TrustedEmbeddedBlobDataSize() {
+ return v8_Trusted_embedded_blob_data_size_;
}
#endif
@@ -168,8 +170,8 @@ namespace {
std::atomic<const uint8_t*> current_embedded_blob_code_(nullptr);
std::atomic<uint32_t> current_embedded_blob_code_size_(0);
-std::atomic<const uint8_t*> current_embedded_blob_metadata_(nullptr);
-std::atomic<uint32_t> current_embedded_blob_metadata_size_(0);
+std::atomic<const uint8_t*> current_embedded_blob_data_(nullptr);
+std::atomic<uint32_t> current_embedded_blob_data_size_(0);
// The various workflows around embedded snapshots are fairly complex. We need
// to support plain old snapshot builds, nosnap builds, and the requirements of
@@ -195,16 +197,16 @@ std::atomic<uint32_t> current_embedded_blob_metadata_size_(0);
// This mutex protects access to the following variables:
// - sticky_embedded_blob_code_
// - sticky_embedded_blob_code_size_
-// - sticky_embedded_blob_metadata_
-// - sticky_embedded_blob_metadata_size_
+// - sticky_embedded_blob_data_
+// - sticky_embedded_blob_data_size_
// - enable_embedded_blob_refcounting_
// - current_embedded_blob_refs_
base::LazyMutex current_embedded_blob_refcount_mutex_ = LAZY_MUTEX_INITIALIZER;
const uint8_t* sticky_embedded_blob_code_ = nullptr;
uint32_t sticky_embedded_blob_code_size_ = 0;
-const uint8_t* sticky_embedded_blob_metadata_ = nullptr;
-uint32_t sticky_embedded_blob_metadata_size_ = 0;
+const uint8_t* sticky_embedded_blob_data_ = nullptr;
+uint32_t sticky_embedded_blob_data_size_ = 0;
bool enable_embedded_blob_refcounting_ = true;
int current_embedded_blob_refs_ = 0;
@@ -213,19 +215,17 @@ const uint8_t* StickyEmbeddedBlobCode() { return sticky_embedded_blob_code_; }
uint32_t StickyEmbeddedBlobCodeSize() {
return sticky_embedded_blob_code_size_;
}
-const uint8_t* StickyEmbeddedBlobMetadata() {
- return sticky_embedded_blob_metadata_;
-}
-uint32_t StickyEmbeddedBlobMetadataSize() {
- return sticky_embedded_blob_metadata_size_;
+const uint8_t* StickyEmbeddedBlobData() { return sticky_embedded_blob_data_; }
+uint32_t StickyEmbeddedBlobDataSize() {
+ return sticky_embedded_blob_data_size_;
}
void SetStickyEmbeddedBlob(const uint8_t* code, uint32_t code_size,
- const uint8_t* metadata, uint32_t metadata_size) {
+ const uint8_t* data, uint32_t data_size) {
sticky_embedded_blob_code_ = code;
sticky_embedded_blob_code_size_ = code_size;
- sticky_embedded_blob_metadata_ = metadata;
- sticky_embedded_blob_metadata_size_ = metadata_size;
+ sticky_embedded_blob_data_ = data;
+ sticky_embedded_blob_data_size_ = data_size;
}
} // namespace
@@ -242,23 +242,22 @@ void FreeCurrentEmbeddedBlob() {
if (StickyEmbeddedBlobCode() == nullptr) return;
CHECK_EQ(StickyEmbeddedBlobCode(), Isolate::CurrentEmbeddedBlobCode());
- CHECK_EQ(StickyEmbeddedBlobMetadata(),
- Isolate::CurrentEmbeddedBlobMetadata());
+ CHECK_EQ(StickyEmbeddedBlobData(), Isolate::CurrentEmbeddedBlobData());
InstructionStream::FreeOffHeapInstructionStream(
const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlobCode()),
Isolate::CurrentEmbeddedBlobCodeSize(),
- const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlobMetadata()),
- Isolate::CurrentEmbeddedBlobMetadataSize());
+ const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlobData()),
+ Isolate::CurrentEmbeddedBlobDataSize());
current_embedded_blob_code_.store(nullptr, std::memory_order_relaxed);
current_embedded_blob_code_size_.store(0, std::memory_order_relaxed);
- current_embedded_blob_metadata_.store(nullptr, std::memory_order_relaxed);
- current_embedded_blob_metadata_size_.store(0, std::memory_order_relaxed);
+ current_embedded_blob_data_.store(nullptr, std::memory_order_relaxed);
+ current_embedded_blob_data_size_.store(0, std::memory_order_relaxed);
sticky_embedded_blob_code_ = nullptr;
sticky_embedded_blob_code_size_ = 0;
- sticky_embedded_blob_metadata_ = nullptr;
- sticky_embedded_blob_metadata_size_ = 0;
+ sticky_embedded_blob_data_ = nullptr;
+ sticky_embedded_blob_data_size_ = 0;
}
// static
@@ -278,29 +277,37 @@ bool Isolate::CurrentEmbeddedBlobIsBinaryEmbedded() {
}
void Isolate::SetEmbeddedBlob(const uint8_t* code, uint32_t code_size,
- const uint8_t* metadata, uint32_t metadata_size) {
+ const uint8_t* data, uint32_t data_size) {
CHECK_NOT_NULL(code);
- CHECK_NOT_NULL(metadata);
+ CHECK_NOT_NULL(data);
embedded_blob_code_ = code;
embedded_blob_code_size_ = code_size;
- embedded_blob_metadata_ = metadata;
- embedded_blob_metadata_size_ = metadata_size;
+ embedded_blob_data_ = data;
+ embedded_blob_data_size_ = data_size;
current_embedded_blob_code_.store(code, std::memory_order_relaxed);
current_embedded_blob_code_size_.store(code_size, std::memory_order_relaxed);
- current_embedded_blob_metadata_.store(metadata, std::memory_order_relaxed);
- current_embedded_blob_metadata_size_.store(metadata_size,
- std::memory_order_relaxed);
+ current_embedded_blob_data_.store(data, std::memory_order_relaxed);
+ current_embedded_blob_data_size_.store(data_size, std::memory_order_relaxed);
#ifdef DEBUG
// Verify that the contents of the embedded blob are unchanged from
// serialization-time, just to ensure the compiler isn't messing with us.
EmbeddedData d = EmbeddedData::FromBlob();
- if (d.EmbeddedBlobHash() != d.CreateEmbeddedBlobHash()) {
+ if (d.EmbeddedBlobDataHash() != d.CreateEmbeddedBlobDataHash()) {
FATAL(
- "Embedded blob checksum verification failed. This indicates that the "
- "embedded blob has been modified since compilation time. A common "
- "cause is a debugging breakpoint set within builtin code.");
+ "Embedded blob data section checksum verification failed. This "
+ "indicates that the embedded blob has been modified since compilation "
+ "time.");
+ }
+ if (FLAG_text_is_readable) {
+ if (d.EmbeddedBlobCodeHash() != d.CreateEmbeddedBlobCodeHash()) {
+ FATAL(
+ "Embedded blob code section checksum verification failed. This "
+ "indicates that the embedded blob has been modified since "
+ "compilation time. A common cause is a debugging breakpoint set "
+ "within builtin code.");
+ }
}
#endif // DEBUG
@@ -313,21 +320,21 @@ void Isolate::ClearEmbeddedBlob() {
CHECK(enable_embedded_blob_refcounting_);
CHECK_EQ(embedded_blob_code_, CurrentEmbeddedBlobCode());
CHECK_EQ(embedded_blob_code_, StickyEmbeddedBlobCode());
- CHECK_EQ(embedded_blob_metadata_, CurrentEmbeddedBlobMetadata());
- CHECK_EQ(embedded_blob_metadata_, StickyEmbeddedBlobMetadata());
+ CHECK_EQ(embedded_blob_data_, CurrentEmbeddedBlobData());
+ CHECK_EQ(embedded_blob_data_, StickyEmbeddedBlobData());
embedded_blob_code_ = nullptr;
embedded_blob_code_size_ = 0;
- embedded_blob_metadata_ = nullptr;
- embedded_blob_metadata_size_ = 0;
+ embedded_blob_data_ = nullptr;
+ embedded_blob_data_size_ = 0;
current_embedded_blob_code_.store(nullptr, std::memory_order_relaxed);
current_embedded_blob_code_size_.store(0, std::memory_order_relaxed);
- current_embedded_blob_metadata_.store(nullptr, std::memory_order_relaxed);
- current_embedded_blob_metadata_size_.store(0, std::memory_order_relaxed);
+ current_embedded_blob_data_.store(nullptr, std::memory_order_relaxed);
+ current_embedded_blob_data_size_.store(0, std::memory_order_relaxed);
sticky_embedded_blob_code_ = nullptr;
sticky_embedded_blob_code_size_ = 0;
- sticky_embedded_blob_metadata_ = nullptr;
- sticky_embedded_blob_metadata_size_ = 0;
+ sticky_embedded_blob_data_ = nullptr;
+ sticky_embedded_blob_data_size_ = 0;
}
const uint8_t* Isolate::embedded_blob_code() const {
@@ -336,11 +343,11 @@ const uint8_t* Isolate::embedded_blob_code() const {
uint32_t Isolate::embedded_blob_code_size() const {
return embedded_blob_code_size_;
}
-const uint8_t* Isolate::embedded_blob_metadata() const {
- return embedded_blob_metadata_;
+const uint8_t* Isolate::embedded_blob_data() const {
+ return embedded_blob_data_;
}
-uint32_t Isolate::embedded_blob_metadata_size() const {
- return embedded_blob_metadata_size_;
+uint32_t Isolate::embedded_blob_data_size() const {
+ return embedded_blob_data_size_;
}
// static
@@ -356,14 +363,14 @@ uint32_t Isolate::CurrentEmbeddedBlobCodeSize() {
}
// static
-const uint8_t* Isolate::CurrentEmbeddedBlobMetadata() {
- return current_embedded_blob_metadata_.load(
+const uint8_t* Isolate::CurrentEmbeddedBlobData() {
+ return current_embedded_blob_data_.load(
std::memory_order::memory_order_relaxed);
}
// static
-uint32_t Isolate::CurrentEmbeddedBlobMetadataSize() {
- return current_embedded_blob_metadata_size_.load(
+uint32_t Isolate::CurrentEmbeddedBlobDataSize() {
+ return current_embedded_blob_data_size_.load(
std::memory_order::memory_order_relaxed);
}
@@ -385,13 +392,14 @@ size_t Isolate::HashIsolateForEmbeddedBlob() {
reinterpret_cast<uint8_t*>(code.ptr() - kHeapObjectTag);
// These static asserts ensure we don't miss relevant fields. We don't hash
- // instruction size and flags since they change when creating the off-heap
- // trampolines. Other data fields must remain the same.
+ // instruction/metadata size and flags since they change when creating the
+ // off-heap trampolines. Other data fields must remain the same.
STATIC_ASSERT(Code::kInstructionSizeOffset == Code::kDataStart);
- STATIC_ASSERT(Code::kFlagsOffset == Code::kInstructionSizeOffsetEnd + 1);
- STATIC_ASSERT(Code::kSafepointTableOffsetOffset ==
- Code::kFlagsOffsetEnd + 1);
- static constexpr int kStartOffset = Code::kSafepointTableOffsetOffset;
+ STATIC_ASSERT(Code::kMetadataSizeOffset ==
+ Code::kInstructionSizeOffsetEnd + 1);
+ STATIC_ASSERT(Code::kFlagsOffset == Code::kMetadataSizeOffsetEnd + 1);
+ STATIC_ASSERT(Code::kBuiltinIndexOffset == Code::kFlagsOffsetEnd + 1);
+ static constexpr int kStartOffset = Code::kBuiltinIndexOffset;
for (int j = kStartOffset; j < Code::kUnalignedHeaderSize; j++) {
hash = base::hash_combine(hash, size_t{code_ptr[j]});
@@ -1020,6 +1028,9 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
CaptureStackTraceOptions options) {
DisallowJavascriptExecution no_js(isolate);
+ TRACE_EVENT_BEGIN1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
+ "CaptureStackTrace", "maxFrameCount", options.limit);
+
wasm::WasmCodeRefScope code_ref_scope;
FrameArrayBuilder builder(isolate, options.skip_mode, options.limit, caller,
options.filter_mode);
@@ -1040,7 +1051,7 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
// A standard frame may include many summarized frames (due to
// inlining).
std::vector<FrameSummary> frames;
- StandardFrame::cast(frame)->Summarize(&frames);
+ CommonFrame::cast(frame)->Summarize(&frames);
for (size_t i = frames.size(); i-- != 0 && !builder.full();) {
auto& summary = frames[i];
if (options.capture_only_frames_subject_to_debugging &&
@@ -1141,7 +1152,10 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
}
}
- return builder.GetElementsAsStackTraceFrameArray();
+ Handle<FixedArray> stack_trace = builder.GetElementsAsStackTraceFrameArray();
+ TRACE_EVENT_END1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
+ "CaptureStackTrace", "frameCount", stack_trace->length());
+ return stack_trace;
}
} // namespace
@@ -1411,7 +1425,7 @@ Object Isolate::StackOverflow() {
ErrorUtils::Construct(this, fun, fun, msg, SKIP_NONE, no_caller,
ErrorUtils::StackTraceCollection::kSimple));
- Throw(*exception, nullptr);
+ Throw(*exception);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && FLAG_stress_compaction) {
@@ -1423,7 +1437,7 @@ Object Isolate::StackOverflow() {
return ReadOnlyRoots(heap()).exception();
}
-void Isolate::ThrowAt(Handle<JSObject> exception, MessageLocation* location) {
+Object Isolate::ThrowAt(Handle<JSObject> exception, MessageLocation* location) {
Handle<Name> key_start_pos = factory()->error_start_pos_symbol();
Object::SetProperty(this, exception, key_start_pos,
handle(Smi::FromInt(location->start_pos()), this),
@@ -1444,11 +1458,11 @@ void Isolate::ThrowAt(Handle<JSObject> exception, MessageLocation* location) {
Just(ShouldThrow::kThrowOnError))
.Check();
- Throw(*exception, location);
+ return ThrowInternal(*exception, location);
}
Object Isolate::TerminateExecution() {
- return Throw(ReadOnlyRoots(this).termination_exception(), nullptr);
+ return Throw(ReadOnlyRoots(this).termination_exception());
}
void Isolate::CancelTerminateExecution() {
@@ -1578,7 +1592,7 @@ Handle<JSMessageObject> Isolate::CreateMessageOrAbort(
return message_obj;
}
-Object Isolate::Throw(Object raw_exception, MessageLocation* location) {
+Object Isolate::ThrowInternal(Object raw_exception, MessageLocation* location) {
DCHECK(!has_pending_exception());
HandleScope scope(this);
@@ -1803,7 +1817,7 @@ Object Isolate::UnwindAndFindHandler() {
code.stack_slots() * kSystemPointerSize;
// TODO(bmeurer): Turbofanned BUILTIN frames appear as OPTIMIZED,
- // but do not have a code kind of OPTIMIZED_FUNCTION.
+ // but do not have a code kind of TURBOFAN.
if (CodeKindCanDeoptimize(code.kind()) &&
code.marked_for_deoptimization()) {
// If the target code is lazy deoptimized, we jump to the original
@@ -1880,9 +1894,8 @@ Object Isolate::UnwindAndFindHandler() {
case StackFrame::BUILTIN:
// For builtin frames we are guaranteed not to find a handler.
if (catchable_by_js) {
- CHECK_EQ(-1,
- JavaScriptFrame::cast(frame)->LookupExceptionHandlerInTable(
- nullptr, nullptr));
+ CHECK_EQ(-1, BuiltinFrame::cast(frame)->LookupExceptionHandlerInTable(
+ nullptr, nullptr));
}
break;
@@ -2114,7 +2127,7 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
bool Isolate::ComputeLocation(MessageLocation* target) {
StackTraceFrameIterator it(this);
if (it.done()) return false;
- StandardFrame* frame = it.frame();
+ CommonFrame* frame = it.frame();
// Compute the location from the function and the relocation info of the
// baseline code. For optimized code this will use the deoptimization
// information to get canonical location information.
@@ -2656,6 +2669,15 @@ void Isolate::ReleaseSharedPtrs() {
}
}
+bool Isolate::IsBuiltinsTableHandleLocation(Address* handle_location) {
+ FullObjectSlot location(handle_location);
+ FullObjectSlot first_root(builtins_table());
+ FullObjectSlot last_root(builtins_table() + Builtins::builtin_count);
+ if (location >= last_root) return false;
+ if (location < first_root) return false;
+ return true;
+}
+
void Isolate::RegisterManagedPtrDestructor(ManagedPtrDestructor* destructor) {
base::MutexGuard lock(&managed_ptr_destructors_mutex_);
DCHECK_NULL(destructor->prev_);
@@ -2857,18 +2879,16 @@ std::atomic<size_t> Isolate::non_disposed_isolates_;
#endif // DEBUG
// static
-Isolate* Isolate::New(IsolateAllocationMode mode) {
+Isolate* Isolate::New() {
// IsolateAllocator allocates the memory for the Isolate object according to
// the given allocation mode.
std::unique_ptr<IsolateAllocator> isolate_allocator =
- std::make_unique<IsolateAllocator>(mode);
+ std::make_unique<IsolateAllocator>();
// Construct Isolate object in the allocated memory.
void* isolate_ptr = isolate_allocator->isolate_memory();
Isolate* isolate = new (isolate_ptr) Isolate(std::move(isolate_allocator));
-#if V8_TARGET_ARCH_64_BIT
- DCHECK_IMPLIES(
- mode == IsolateAllocationMode::kInV8Heap,
- IsAligned(isolate->isolate_root(), kPtrComprIsolateRootAlignment));
+#ifdef V8_COMPRESS_POINTERS
+ DCHECK(IsAligned(isolate->isolate_root(), kPtrComprIsolateRootAlignment));
#endif
#ifdef DEBUG
@@ -2933,6 +2953,9 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)),
allocator_(new TracingAccountingAllocator(this)),
builtins_(this),
+#if defined(DEBUG) || defined(VERIFY_HEAP)
+ num_active_deserializers_(0),
+#endif
rail_mode_(PERFORMANCE_ANIMATION),
code_event_dispatcher_(new CodeEventDispatcher()),
persistent_handles_list_(new PersistentHandlesList()),
@@ -2982,6 +3005,15 @@ void Isolate::CheckIsolateLayout() {
Internals::kIsolateStackGuardOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.roots_)),
Internals::kIsolateRootsOffset);
+
+#ifdef V8_HEAP_SANDBOX
+ CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, buffer_)),
+ Internals::kExternalPointerTableBufferOffset);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, length_)),
+ Internals::kExternalPointerTableLengthOffset);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, capacity_)),
+ Internals::kExternalPointerTableCapacityOffset);
+#endif
}
void Isolate::ClearSerializerData() {
@@ -3057,8 +3089,6 @@ void Isolate::Deinit() {
ReleaseSharedPtrs();
- delete deoptimizer_data_;
- deoptimizer_data_ = nullptr;
string_table_.reset();
builtins_.TearDown();
bootstrapper_->TearDown();
@@ -3267,17 +3297,16 @@ namespace {
void CreateOffHeapTrampolines(Isolate* isolate) {
DCHECK_NOT_NULL(isolate->embedded_blob_code());
DCHECK_NE(0, isolate->embedded_blob_code_size());
- DCHECK_NOT_NULL(isolate->embedded_blob_metadata());
- DCHECK_NE(0, isolate->embedded_blob_metadata_size());
+ DCHECK_NOT_NULL(isolate->embedded_blob_data());
+ DCHECK_NE(0, isolate->embedded_blob_data_size());
HandleScope scope(isolate);
Builtins* builtins = isolate->builtins();
EmbeddedData d = EmbeddedData::FromBlob();
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (int i = 0; i < Builtins::builtin_count; i++) {
- if (!Builtins::IsIsolateIndependent(i)) continue;
-
Address instruction_start = d.InstructionStartOfBuiltin(i);
Handle<Code> trampoline = isolate->factory()->NewOffHeapTrampolineFor(
builtins->builtin_handle(i), instruction_start);
@@ -3300,15 +3329,15 @@ bool IsolateIsCompatibleWithEmbeddedBlob(Isolate* isolate) {
void Isolate::InitializeDefaultEmbeddedBlob() {
const uint8_t* code = DefaultEmbeddedBlobCode();
uint32_t code_size = DefaultEmbeddedBlobCodeSize();
- const uint8_t* metadata = DefaultEmbeddedBlobMetadata();
- uint32_t metadata_size = DefaultEmbeddedBlobMetadataSize();
+ const uint8_t* data = DefaultEmbeddedBlobData();
+ uint32_t data_size = DefaultEmbeddedBlobDataSize();
#ifdef V8_MULTI_SNAPSHOTS
if (!FLAG_untrusted_code_mitigations) {
code = TrustedEmbeddedBlobCode();
code_size = TrustedEmbeddedBlobCodeSize();
- metadata = TrustedEmbeddedBlobMetadata();
- metadata_size = TrustedEmbeddedBlobMetadataSize();
+ data = TrustedEmbeddedBlobData();
+ data_size = TrustedEmbeddedBlobDataSize();
}
#endif
@@ -3318,8 +3347,8 @@ void Isolate::InitializeDefaultEmbeddedBlob() {
if (StickyEmbeddedBlobCode() != nullptr) {
code = StickyEmbeddedBlobCode();
code_size = StickyEmbeddedBlobCodeSize();
- metadata = StickyEmbeddedBlobMetadata();
- metadata_size = StickyEmbeddedBlobMetadataSize();
+ data = StickyEmbeddedBlobData();
+ data_size = StickyEmbeddedBlobDataSize();
current_embedded_blob_refs_++;
}
}
@@ -3327,7 +3356,7 @@ void Isolate::InitializeDefaultEmbeddedBlob() {
if (code == nullptr) {
CHECK_EQ(0, code_size);
} else {
- SetEmbeddedBlob(code, code_size, metadata, metadata_size);
+ SetEmbeddedBlob(code, code_size, data, data_size);
}
}
@@ -3341,25 +3370,25 @@ void Isolate::CreateAndSetEmbeddedBlob() {
// If a sticky blob has been set, we reuse it.
if (StickyEmbeddedBlobCode() != nullptr) {
CHECK_EQ(embedded_blob_code(), StickyEmbeddedBlobCode());
- CHECK_EQ(embedded_blob_metadata(), StickyEmbeddedBlobMetadata());
+ CHECK_EQ(embedded_blob_data(), StickyEmbeddedBlobData());
CHECK_EQ(CurrentEmbeddedBlobCode(), StickyEmbeddedBlobCode());
- CHECK_EQ(CurrentEmbeddedBlobMetadata(), StickyEmbeddedBlobMetadata());
+ CHECK_EQ(CurrentEmbeddedBlobData(), StickyEmbeddedBlobData());
} else {
// Create and set a new embedded blob.
uint8_t* code;
uint32_t code_size;
- uint8_t* metadata;
- uint32_t metadata_size;
- InstructionStream::CreateOffHeapInstructionStream(
- this, &code, &code_size, &metadata, &metadata_size);
+ uint8_t* data;
+ uint32_t data_size;
+ InstructionStream::CreateOffHeapInstructionStream(this, &code, &code_size,
+ &data, &data_size);
CHECK_EQ(0, current_embedded_blob_refs_);
const uint8_t* const_code = const_cast<const uint8_t*>(code);
- const uint8_t* const_metadata = const_cast<const uint8_t*>(metadata);
- SetEmbeddedBlob(const_code, code_size, const_metadata, metadata_size);
+ const uint8_t* const_data = const_cast<const uint8_t*>(data);
+ SetEmbeddedBlob(const_code, code_size, const_data, data_size);
current_embedded_blob_refs_++;
- SetStickyEmbeddedBlob(code, code_size, metadata, metadata_size);
+ SetStickyEmbeddedBlob(code, code_size, data, data_size);
}
CreateOffHeapTrampolines(this);
@@ -3370,9 +3399,9 @@ void Isolate::TearDownEmbeddedBlob() {
if (StickyEmbeddedBlobCode() == nullptr) return;
CHECK_EQ(embedded_blob_code(), StickyEmbeddedBlobCode());
- CHECK_EQ(embedded_blob_metadata(), StickyEmbeddedBlobMetadata());
+ CHECK_EQ(embedded_blob_data(), StickyEmbeddedBlobData());
CHECK_EQ(CurrentEmbeddedBlobCode(), StickyEmbeddedBlobCode());
- CHECK_EQ(CurrentEmbeddedBlobMetadata(), StickyEmbeddedBlobMetadata());
+ CHECK_EQ(CurrentEmbeddedBlobData(), StickyEmbeddedBlobData());
base::MutexGuard guard(current_embedded_blob_refcount_mutex_.Pointer());
current_embedded_blob_refs_--;
@@ -3380,19 +3409,19 @@ void Isolate::TearDownEmbeddedBlob() {
// We own the embedded blob and are the last holder. Free it.
InstructionStream::FreeOffHeapInstructionStream(
const_cast<uint8_t*>(embedded_blob_code()), embedded_blob_code_size(),
- const_cast<uint8_t*>(embedded_blob_metadata()),
- embedded_blob_metadata_size());
+ const_cast<uint8_t*>(embedded_blob_data()), embedded_blob_data_size());
ClearEmbeddedBlob();
}
}
-bool Isolate::InitWithoutSnapshot() { return Init(nullptr, nullptr); }
+bool Isolate::InitWithoutSnapshot() { return Init(nullptr, nullptr, false); }
-bool Isolate::InitWithSnapshot(ReadOnlyDeserializer* read_only_deserializer,
- StartupDeserializer* startup_deserializer) {
- DCHECK_NOT_NULL(read_only_deserializer);
- DCHECK_NOT_NULL(startup_deserializer);
- return Init(read_only_deserializer, startup_deserializer);
+bool Isolate::InitWithSnapshot(SnapshotData* startup_snapshot_data,
+ SnapshotData* read_only_snapshot_data,
+ bool can_rehash) {
+ DCHECK_NOT_NULL(startup_snapshot_data);
+ DCHECK_NOT_NULL(read_only_snapshot_data);
+ return Init(startup_snapshot_data, read_only_snapshot_data, can_rehash);
}
static std::string AddressToString(uintptr_t address) {
@@ -3441,12 +3470,12 @@ using MapOfLoadsAndStoresPerFunction =
MapOfLoadsAndStoresPerFunction* stack_access_count_map = nullptr;
} // namespace
-bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
- StartupDeserializer* startup_deserializer) {
+bool Isolate::Init(SnapshotData* startup_snapshot_data,
+ SnapshotData* read_only_snapshot_data, bool can_rehash) {
TRACE_ISOLATE(init);
- const bool create_heap_objects = (read_only_deserializer == nullptr);
+ const bool create_heap_objects = (read_only_snapshot_data == nullptr);
// We either have both or neither.
- DCHECK_EQ(create_heap_objects, startup_deserializer == nullptr);
+ DCHECK_EQ(create_heap_objects, startup_snapshot_data == nullptr);
base::ElapsedTimer timer;
if (create_heap_objects && FLAG_profile_deserialization) timer.Start();
@@ -3507,7 +3536,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
// SetUp the object heap.
DCHECK(!heap_.HasBeenSetUp());
heap_.SetUp();
- ReadOnlyHeap::SetUp(this, read_only_deserializer);
+ ReadOnlyHeap::SetUp(this, read_only_snapshot_data, can_rehash);
heap_.SetUpSpaces();
isolate_data_.external_reference_table()->Init(this);
@@ -3518,8 +3547,6 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
}
DCHECK_NOT_NULL(wasm_engine_);
- deoptimizer_data_ = new DeoptimizerData(heap());
-
if (setup_delegate_ == nullptr) {
setup_delegate_ = new SetupIsolateDelegate(create_heap_objects);
}
@@ -3598,7 +3625,9 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
heap_.read_only_space()->ClearStringPaddingIfNeeded();
read_only_heap_->OnCreateHeapObjectsComplete(this);
} else {
- startup_deserializer->DeserializeInto(this);
+ StartupDeserializer startup_deserializer(this, startup_snapshot_data,
+ can_rehash);
+ startup_deserializer.DeserializeIntoIsolate();
}
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
@@ -4572,7 +4601,7 @@ SaveContext::~SaveContext() {
isolate_->set_context(context_.is_null() ? Context() : *context_);
}
-bool SaveContext::IsBelowFrame(StandardFrame* frame) {
+bool SaveContext::IsBelowFrame(CommonFrame* frame) {
return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
}
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index 43b7e27dd4..18fb4b6417 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -23,6 +23,7 @@
#include "src/common/globals.h"
#include "src/debug/interface-types.h"
#include "src/execution/execution.h"
+#include "src/execution/external-pointer-table.h"
#include "src/execution/futex-emulation.h"
#include "src/execution/isolate-data.h"
#include "src/execution/messages.h"
@@ -70,12 +71,13 @@ class BuiltinsConstantsTableBuilder;
class CancelableTaskManager;
class CodeEventDispatcher;
class CodeTracer;
+class CommonFrame;
class CompilationCache;
class CompilationStatistics;
class CompilerDispatcher;
class Counters;
class Debug;
-class DeoptimizerData;
+class Deoptimizer;
class DescriptorLookupCache;
class EmbeddedFileWriterInterface;
class EternalHandles;
@@ -83,6 +85,7 @@ class HandleScopeImplementer;
class HeapObjectToIndexHashMap;
class HeapProfiler;
class InnerPointerToCodeCache;
+class LocalIsolate;
class Logger;
class MaterializedObjectStore;
class Microtask;
@@ -91,14 +94,12 @@ class OptimizingCompileDispatcher;
class PersistentHandles;
class PersistentHandlesList;
class ReadOnlyArtifacts;
-class ReadOnlyDeserializer;
class RegExpStack;
class RootVisitor;
class RuntimeProfiler;
class SetupIsolateDelegate;
class Simulator;
-class StandardFrame;
-class StartupDeserializer;
+class SnapshotData;
class StringTable;
class StubCache;
class ThreadManager;
@@ -371,6 +372,18 @@ class Recorder;
} \
} while (false)
+#define WHILE_WITH_HANDLE_SCOPE(isolate, limit_check, body) \
+ do { \
+ Isolate* for_with_handle_isolate = isolate; \
+ while (limit_check) { \
+ HandleScope loop_scope(for_with_handle_isolate); \
+ for (int for_with_handle_it = 0; \
+ limit_check && for_with_handle_it < 1024; ++for_with_handle_it) { \
+ body \
+ } \
+ } \
+ } while (false)
+
#define FIELD_ACCESSOR(type, name) \
inline void set_##name(type v) { name##_ = v; } \
inline type name() const { return name##_; }
@@ -410,6 +423,8 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
V(ModifyCodeGenerationFromStringsCallback, modify_code_gen_callback, \
nullptr) \
+ V(ModifyCodeGenerationFromStringsCallback2, modify_code_gen_callback2, \
+ nullptr) \
V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr) \
V(ExtensionCallback, wasm_module_callback, &NoExtension) \
V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
@@ -524,8 +539,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Creates Isolate object. Must be used instead of constructing Isolate with
// new operator.
- static Isolate* New(
- IsolateAllocationMode mode = IsolateAllocationMode::kDefault);
+ static Isolate* New();
// Deletes Isolate object. Must be used instead of delete operator.
// Destroys the non-default isolates.
@@ -537,9 +551,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
ReadOnlyHeap* ro_heap);
void set_read_only_heap(ReadOnlyHeap* ro_heap) { read_only_heap_ = ro_heap; }
- // Returns allocation mode of this isolate.
- V8_INLINE IsolateAllocationMode isolate_allocation_mode();
-
// Page allocator that must be used for allocating V8 heap pages.
v8::PageAllocator* page_allocator();
@@ -573,8 +584,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool InitializeCounters(); // Returns false if already initialized.
bool InitWithoutSnapshot();
- bool InitWithSnapshot(ReadOnlyDeserializer* read_only_deserializer,
- StartupDeserializer* startup_deserializer);
+ bool InitWithSnapshot(SnapshotData* startup_snapshot_data,
+ SnapshotData* read_only_snapshot_data, bool can_rehash);
// True if at least one thread Enter'ed this isolate.
bool IsInUse() { return entry_stack_ != nullptr; }
@@ -615,6 +626,14 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Mutex for serializing access to break control structures.
base::RecursiveMutex* break_access() { return &break_access_; }
+ // Shared mutex for allowing concurrent read/writes to FeedbackVectors.
+ base::SharedMutex* feedback_vector_access() {
+ return &feedback_vector_access_;
+ }
+
+ // Shared mutex for allowing concurrent read/writes to Strings.
+ base::SharedMutex* string_access() { return &string_access_; }
+
// Shared mutex for allowing concurrent read/writes to TransitionArrays.
base::SharedMutex* transition_array_access() {
return &transition_array_access_;
@@ -697,6 +716,27 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return &thread_local_top()->c_function_;
}
+#if defined(DEBUG) || defined(VERIFY_HEAP)
+ // Count the number of active deserializers, so that the heap verifier knows
+ // whether there is currently an active deserialization happening.
+ //
+ // This is needed as the verifier currently doesn't support verifying objects
+ // which are partially deserialized.
+ //
+ // TODO(leszeks): Make the verifier a bit more deserialization compatible.
+ void RegisterDeserializerStarted() { ++num_active_deserializers_; }
+ void RegisterDeserializerFinished() {
+ CHECK_GE(--num_active_deserializers_, 0);
+ }
+ bool has_active_deserializer() const {
+ return num_active_deserializers_.load(std::memory_order_acquire) > 0;
+ }
+#else
+ void RegisterDeserializerStarted() {}
+ void RegisterDeserializerFinished() {}
+ bool has_active_deserializer() const { UNREACHABLE(); }
+#endif
+
// Bottom JS entry.
Address js_entry_sp() { return thread_local_top()->js_entry_sp_; }
inline Address* js_entry_sp_address() {
@@ -789,17 +829,22 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Exception throwing support. The caller should use the result
// of Throw() as its return value.
- Object Throw(Object exception, MessageLocation* location = nullptr);
+ Object Throw(Object exception) { return ThrowInternal(exception, nullptr); }
+ Object ThrowAt(Handle<JSObject> exception, MessageLocation* location);
Object ThrowIllegalOperation();
template <typename T>
- V8_WARN_UNUSED_RESULT MaybeHandle<T> Throw(
- Handle<Object> exception, MessageLocation* location = nullptr) {
- Throw(*exception, location);
+ V8_WARN_UNUSED_RESULT MaybeHandle<T> Throw(Handle<Object> exception) {
+ Throw(*exception);
return MaybeHandle<T>();
}
- void ThrowAt(Handle<JSObject> exception, MessageLocation* location);
+ template <typename T>
+ V8_WARN_UNUSED_RESULT MaybeHandle<T> ThrowAt(Handle<JSObject> exception,
+ MessageLocation* location) {
+ ThrowAt(exception, location);
+ return MaybeHandle<T>();
+ }
void FatalProcessOutOfHeapMemory(const char* location) {
heap()->FatalProcessOutOfMemory(location);
@@ -958,7 +1003,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
static size_t isolate_root_bias() {
return OFFSET_OF(Isolate, isolate_data_) + IsolateData::kIsolateRootBias;
}
- static Isolate* FromRoot(Address isolate_root) {
+ static Isolate* FromRootAddress(Address isolate_root) {
return reinterpret_cast<Isolate*>(isolate_root - isolate_root_bias());
}
@@ -991,9 +1036,21 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
Address* builtin_entry_table() { return isolate_data_.builtin_entry_table(); }
V8_INLINE Address* builtins_table() { return isolate_data_.builtins(); }
+ bool IsBuiltinsTableHandleLocation(Address* handle_location);
+
StubCache* load_stub_cache() { return load_stub_cache_; }
StubCache* store_stub_cache() { return store_stub_cache_; }
- DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
+ Deoptimizer* GetAndClearCurrentDeoptimizer() {
+ Deoptimizer* result = current_deoptimizer_;
+ CHECK_NOT_NULL(result);
+ current_deoptimizer_ = nullptr;
+ return result;
+ }
+ void set_current_deoptimizer(Deoptimizer* deoptimizer) {
+ DCHECK_NULL(current_deoptimizer_);
+ DCHECK_NOT_NULL(deoptimizer);
+ current_deoptimizer_ = deoptimizer;
+ }
bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
void set_deoptimizer_lazy_throw(bool value) {
deoptimizer_lazy_throw_ = value;
@@ -1390,16 +1447,16 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
static const uint8_t* CurrentEmbeddedBlobCode();
static uint32_t CurrentEmbeddedBlobCodeSize();
- static const uint8_t* CurrentEmbeddedBlobMetadata();
- static uint32_t CurrentEmbeddedBlobMetadataSize();
+ static const uint8_t* CurrentEmbeddedBlobData();
+ static uint32_t CurrentEmbeddedBlobDataSize();
static bool CurrentEmbeddedBlobIsBinaryEmbedded();
// These always return the same result as static methods above, but don't
// access the global atomic variable (and thus *might be* slightly faster).
const uint8_t* embedded_blob_code() const;
uint32_t embedded_blob_code_size() const;
- const uint8_t* embedded_blob_metadata() const;
- uint32_t embedded_blob_metadata_size() const;
+ const uint8_t* embedded_blob_data() const;
+ uint32_t embedded_blob_data_size() const;
void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
array_buffer_allocator_ = allocator;
@@ -1560,12 +1617,26 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
MaybeLocal<v8::Context> GetContextFromRecorderContextId(
v8::metrics::Recorder::ContextId id);
+#ifdef V8_HEAP_SANDBOX
+ ExternalPointerTable& external_pointer_table() {
+ return isolate_data_.external_pointer_table_;
+ }
+
+ const ExternalPointerTable& external_pointer_table() const {
+ return isolate_data_.external_pointer_table_;
+ }
+
+ Address external_pointer_table_address() {
+ return reinterpret_cast<Address>(&isolate_data_.external_pointer_table_);
+ }
+#endif
+
private:
explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator);
~Isolate();
- bool Init(ReadOnlyDeserializer* read_only_deserializer,
- StartupDeserializer* startup_deserializer);
+ bool Init(SnapshotData* startup_snapshot_data,
+ SnapshotData* read_only_snapshot_data, bool can_rehash);
void CheckIsolateLayout();
@@ -1660,6 +1731,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void AddCrashKeysForIsolateAndHeapPointers();
+ // Returns the Exception sentinel.
+ Object ThrowInternal(Object exception, MessageLocation* location);
+
// This class contains a collection of data accessible from both C++ runtime
// and compiled code (including assembly stubs, builtins, interpreter bytecode
// handlers and optimized code).
@@ -1681,11 +1755,13 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
CompilationCache* compilation_cache_ = nullptr;
std::shared_ptr<Counters> async_counters_;
base::RecursiveMutex break_access_;
+ base::SharedMutex feedback_vector_access_;
+ base::SharedMutex string_access_;
base::SharedMutex transition_array_access_;
Logger* logger_ = nullptr;
StubCache* load_stub_cache_ = nullptr;
StubCache* store_stub_cache_ = nullptr;
- DeoptimizerData* deoptimizer_data_ = nullptr;
+ Deoptimizer* current_deoptimizer_ = nullptr;
bool deoptimizer_lazy_throw_ = false;
MaterializedObjectStore* materialized_object_store_ = nullptr;
bool capture_stack_trace_for_uncaught_exceptions_ = false;
@@ -1704,6 +1780,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
RuntimeState runtime_state_;
Builtins builtins_;
SetupIsolateDelegate* setup_delegate_ = nullptr;
+#if defined(DEBUG) || defined(VERIFY_HEAP)
+ std::atomic<int> num_active_deserializers_;
+#endif
#ifndef V8_INTL_SUPPORT
unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
@@ -1853,13 +1932,13 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void TearDownEmbeddedBlob();
void SetEmbeddedBlob(const uint8_t* code, uint32_t code_size,
- const uint8_t* metadata, uint32_t metadata_size);
+ const uint8_t* data, uint32_t data_size);
void ClearEmbeddedBlob();
const uint8_t* embedded_blob_code_ = nullptr;
uint32_t embedded_blob_code_size_ = 0;
- const uint8_t* embedded_blob_metadata_ = nullptr;
- uint32_t embedded_blob_metadata_size_ = 0;
+ const uint8_t* embedded_blob_data_ = nullptr;
+ uint32_t embedded_blob_data_size_ = 0;
v8::ArrayBuffer::Allocator* array_buffer_allocator_ = nullptr;
std::shared_ptr<v8::ArrayBuffer::Allocator> array_buffer_allocator_shared_;
@@ -1952,7 +2031,7 @@ class V8_EXPORT_PRIVATE SaveContext {
Handle<Context> context() { return context_; }
// Returns true if this save context is below a given JavaScript frame.
- bool IsBelowFrame(StandardFrame* frame);
+ bool IsBelowFrame(CommonFrame* frame);
private:
Isolate* const isolate_;
@@ -2018,6 +2097,7 @@ class StackLimitCheck {
StackGuard* stack_guard = isolate_->stack_guard();
return GetCurrentStackPosition() < stack_guard->real_climit();
}
+ static bool HasOverflowed(LocalIsolate* local_isolate);
// Use this to check for interrupt request in C++ code.
bool InterruptRequested() {
diff --git a/deps/v8/src/execution/local-isolate-inl.h b/deps/v8/src/execution/local-isolate-inl.h
index 3f61f6716c..318cc10fa4 100644
--- a/deps/v8/src/execution/local-isolate-inl.h
+++ b/deps/v8/src/execution/local-isolate-inl.h
@@ -13,11 +13,11 @@ namespace v8 {
namespace internal {
Address LocalIsolate::isolate_root() const { return isolate_->isolate_root(); }
-ReadOnlyHeap* LocalIsolate::read_only_heap() {
+ReadOnlyHeap* LocalIsolate::read_only_heap() const {
return isolate_->read_only_heap();
}
-Object LocalIsolate::root(RootIndex index) {
+Object LocalIsolate::root(RootIndex index) const {
DCHECK(RootsTable::IsImmortalImmovable(index));
return isolate_->root(index);
}
diff --git a/deps/v8/src/execution/local-isolate.cc b/deps/v8/src/execution/local-isolate.cc
index bba871c35b..77733907f8 100644
--- a/deps/v8/src/execution/local-isolate.cc
+++ b/deps/v8/src/execution/local-isolate.cc
@@ -12,12 +12,15 @@
namespace v8 {
namespace internal {
-LocalIsolate::LocalIsolate(Isolate* isolate)
+LocalIsolate::LocalIsolate(Isolate* isolate, ThreadKind kind)
: HiddenLocalFactory(isolate),
- heap_(isolate->heap()),
+ heap_(isolate->heap(), kind),
isolate_(isolate),
logger_(new LocalLogger(isolate)),
- thread_id_(ThreadId::Current()) {}
+ thread_id_(ThreadId::Current()),
+ stack_limit_(kind == ThreadKind::kMain
+ ? isolate->stack_guard()->real_climit()
+ : GetCurrentStackPosition() - FLAG_stack_size * KB) {}
LocalIsolate::~LocalIsolate() = default;
@@ -29,10 +32,15 @@ int LocalIsolate::GetNextUniqueSharedFunctionInfoId() {
}
#endif // V8_SFI_HAS_UNIQUE_ID
-bool LocalIsolate::is_collecting_type_profile() {
+bool LocalIsolate::is_collecting_type_profile() const {
// TODO(leszeks): Figure out if it makes sense to check this asynchronously.
return isolate_->is_collecting_type_profile();
}
+// static
+bool StackLimitCheck::HasOverflowed(LocalIsolate* local_isolate) {
+ return GetCurrentStackPosition() < local_isolate->stack_limit();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/local-isolate.h b/deps/v8/src/execution/local-isolate.h
index 1420ae7311..7cfa156fb7 100644
--- a/deps/v8/src/execution/local-isolate.h
+++ b/deps/v8/src/execution/local-isolate.h
@@ -36,7 +36,7 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
public:
using HandleScopeType = LocalHandleScope;
- explicit LocalIsolate(Isolate* isolate);
+ explicit LocalIsolate(Isolate* isolate, ThreadKind kind);
~LocalIsolate();
// Kinda sketchy.
@@ -48,12 +48,10 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
LocalHeap* heap() { return &heap_; }
inline Address isolate_root() const;
- inline ReadOnlyHeap* read_only_heap();
- inline Object root(RootIndex index);
+ inline ReadOnlyHeap* read_only_heap() const;
+ inline Object root(RootIndex index) const;
- StringTable* string_table() { return isolate_->string_table(); }
-
- const Isolate* GetIsolateForPtrCompr() const { return isolate_; }
+ StringTable* string_table() const { return isolate_->string_table(); }
v8::internal::LocalFactory* factory() {
// Upcast to the privately inherited base-class using c-style casts to avoid
@@ -77,10 +75,11 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
int GetNextUniqueSharedFunctionInfoId();
#endif // V8_SFI_HAS_UNIQUE_ID
- bool is_collecting_type_profile();
+ bool is_collecting_type_profile() const;
- LocalLogger* logger() { return logger_.get(); }
- ThreadId thread_id() { return thread_id_; }
+ LocalLogger* logger() const { return logger_.get(); }
+ ThreadId thread_id() const { return thread_id_; }
+ Address stack_limit() const { return stack_limit_; }
private:
friend class v8::internal::LocalFactory;
@@ -89,10 +88,11 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
// TODO(leszeks): Extract out the fields of the Isolate we want and store
// those instead of the whole thing.
- Isolate* isolate_;
+ Isolate* const isolate_;
std::unique_ptr<LocalLogger> logger_;
- ThreadId thread_id_;
+ ThreadId const thread_id_;
+ Address const stack_limit_;
};
} // namespace internal
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index ab6c6bc392..ea31dc3374 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -514,26 +514,6 @@ int JSStackFrame::GetColumnNumber() {
return kNone;
}
-int JSStackFrame::GetEnclosingLineNumber() {
- if (HasScript()) {
- Handle<SharedFunctionInfo> shared = handle(function_->shared(), isolate_);
- return Script::GetLineNumber(GetScript(),
- shared->function_token_position()) + 1;
- } else {
- return kNone;
- }
-}
-
-int JSStackFrame::GetEnclosingColumnNumber() {
- if (HasScript()) {
- Handle<SharedFunctionInfo> shared = handle(function_->shared(), isolate_);
- return Script::GetColumnNumber(GetScript(),
- shared->function_token_position()) + 1;
- } else {
- return kNone;
- }
-}
-
int JSStackFrame::GetPromiseIndex() const {
return (is_promise_all_ || is_promise_any_) ? offset_ : kNone;
}
@@ -622,12 +602,6 @@ int WasmStackFrame::GetPosition() const {
int WasmStackFrame::GetColumnNumber() { return GetModuleOffset(); }
-int WasmStackFrame::GetEnclosingColumnNumber() {
- const int function_offset =
- GetWasmFunctionOffset(wasm_instance_->module(), wasm_func_index_);
- return function_offset;
-}
-
int WasmStackFrame::GetModuleOffset() const {
const int function_offset =
GetWasmFunctionOffset(wasm_instance_->module(), wasm_func_index_);
@@ -698,26 +672,6 @@ int AsmJsWasmStackFrame::GetColumnNumber() {
return Script::GetColumnNumber(script, GetPosition()) + 1;
}
-int AsmJsWasmStackFrame::GetEnclosingLineNumber() {
- DCHECK_LE(0, GetPosition());
- Handle<Script> script(wasm_instance_->module_object().script(), isolate_);
- DCHECK(script->IsUserJavaScript());
- int byte_offset = GetSourcePosition(wasm_instance_->module(),
- wasm_func_index_, 0,
- is_at_number_conversion_);
- return Script::GetLineNumber(script, byte_offset) + 1;
-}
-
-int AsmJsWasmStackFrame::GetEnclosingColumnNumber() {
- DCHECK_LE(0, GetPosition());
- Handle<Script> script(wasm_instance_->module_object().script(), isolate_);
- DCHECK(script->IsUserJavaScript());
- int byte_offset = GetSourcePosition(wasm_instance_->module(),
- wasm_func_index_, 0,
- is_at_number_conversion_);
- return Script::GetColumnNumber(script, byte_offset) + 1;
-}
-
FrameArrayIterator::FrameArrayIterator(Isolate* isolate,
Handle<FrameArray> array, int frame_ix)
: isolate_(isolate), array_(array), frame_ix_(frame_ix) {}
@@ -1280,7 +1234,18 @@ Handle<String> BuildDefaultCallSite(Isolate* isolate, Handle<Object> object) {
builder.AppendString(Object::TypeOf(isolate, object));
if (object->IsString()) {
builder.AppendCString(" \"");
- builder.AppendString(Handle<String>::cast(object));
+ Handle<String> string = Handle<String>::cast(object);
+ // This threshold must be sufficiently far below String::kMaxLength that
+ // the {builder}'s result can never exceed that limit.
+ constexpr int kMaxPrintedStringLength = 100;
+ if (string->length() <= kMaxPrintedStringLength) {
+ builder.AppendString(string);
+ } else {
+ string = isolate->factory()->NewProperSubString(string, 0,
+ kMaxPrintedStringLength);
+ builder.AppendString(string);
+ builder.AppendCString("<...>");
+ }
builder.AppendCString("\"");
} else if (object->IsNull(isolate)) {
builder.AppendCString(" ");
@@ -1337,13 +1302,12 @@ MessageTemplate UpdateErrorTemplate(CallPrinter::ErrorHint hint,
case CallPrinter::ErrorHint::kNone:
return default_id;
}
- return default_id;
}
} // namespace
-Handle<Object> ErrorUtils::NewIteratorError(Isolate* isolate,
- Handle<Object> source) {
+Handle<JSObject> ErrorUtils::NewIteratorError(Isolate* isolate,
+ Handle<Object> source) {
MessageLocation location;
CallPrinter::ErrorHint hint = CallPrinter::kNone;
Handle<String> callsite = RenderCallSite(isolate, source, &location, &hint);
@@ -1387,13 +1351,13 @@ Object ErrorUtils::ThrowSpreadArgError(Isolate* isolate, MessageTemplate id,
}
}
- Handle<Object> exception =
- isolate->factory()->NewTypeError(id, callsite, object);
- return isolate->Throw(*exception, &location);
+ isolate->ThrowAt(isolate->factory()->NewTypeError(id, callsite, object),
+ &location);
+ return ReadOnlyRoots(isolate).exception();
}
-Handle<Object> ErrorUtils::NewCalledNonCallableError(Isolate* isolate,
- Handle<Object> source) {
+Handle<JSObject> ErrorUtils::NewCalledNonCallableError(Isolate* isolate,
+ Handle<Object> source) {
MessageLocation location;
CallPrinter::ErrorHint hint = CallPrinter::kNone;
Handle<String> callsite = RenderCallSite(isolate, source, &location, &hint);
@@ -1402,7 +1366,7 @@ Handle<Object> ErrorUtils::NewCalledNonCallableError(Isolate* isolate,
return isolate->factory()->NewTypeError(id, callsite);
}
-Handle<Object> ErrorUtils::NewConstructedNonConstructable(
+Handle<JSObject> ErrorUtils::NewConstructedNonConstructable(
Isolate* isolate, Handle<Object> source) {
MessageLocation location;
CallPrinter::ErrorHint hint = CallPrinter::kNone;
@@ -1412,10 +1376,6 @@ Handle<Object> ErrorUtils::NewConstructedNonConstructable(
}
Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
- Handle<Object> object) {
- return ThrowLoadFromNullOrUndefined(isolate, object, MaybeHandle<Object>());
-}
-Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
Handle<Object> object,
MaybeHandle<Object> key) {
DCHECK(object->IsNullOrUndefined());
@@ -1487,7 +1447,7 @@ Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
callsite = BuildDefaultCallSite(isolate, object);
}
- Handle<Object> error;
+ Handle<JSObject> error;
Handle<String> property_name;
if (is_destructuring) {
if (maybe_property_name.ToHandle(&property_name)) {
@@ -1511,7 +1471,12 @@ Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
}
}
- return isolate->Throw(*error, location_computed ? &location : nullptr);
+ if (location_computed) {
+ isolate->ThrowAt(error, &location);
+ } else {
+ isolate->Throw(*error);
+ }
+ return ReadOnlyRoots(isolate).exception();
}
} // namespace internal
diff --git a/deps/v8/src/execution/messages.h b/deps/v8/src/execution/messages.h
index ad72d762d2..4aab728f7c 100644
--- a/deps/v8/src/execution/messages.h
+++ b/deps/v8/src/execution/messages.h
@@ -87,9 +87,6 @@ class StackFrameBase {
// Return 0-based Wasm function index. Returns -1 for non-Wasm frames.
virtual int GetWasmFunctionIndex();
- virtual int GetEnclosingColumnNumber() = 0;
- virtual int GetEnclosingLineNumber() = 0;
-
// Returns the index of the rejected promise in the Promise combinator input,
// or -1 if this frame is not a Promise combinator frame.
virtual int GetPromiseIndex() const = 0;
@@ -136,9 +133,6 @@ class JSStackFrame : public StackFrameBase {
int GetLineNumber() override;
int GetColumnNumber() override;
- int GetEnclosingColumnNumber() override;
- int GetEnclosingLineNumber() override;
-
int GetPromiseIndex() const override;
bool IsNative() override;
@@ -189,8 +183,6 @@ class WasmStackFrame : public StackFrameBase {
int GetPosition() const override;
int GetLineNumber() override { return 0; }
int GetColumnNumber() override;
- int GetEnclosingColumnNumber() override;
- int GetEnclosingLineNumber() override { return 0; }
int GetWasmFunctionIndex() override { return wasm_func_index_; }
int GetPromiseIndex() const override { return GetPosition(); }
@@ -239,9 +231,6 @@ class AsmJsWasmStackFrame : public WasmStackFrame {
int GetLineNumber() override;
int GetColumnNumber() override;
- int GetEnclosingColumnNumber() override;
- int GetEnclosingLineNumber() override;
-
private:
friend class FrameArrayIterator;
AsmJsWasmStackFrame() = default;
@@ -308,16 +297,16 @@ class ErrorUtils : public AllStatic {
Handle<JSObject> error,
Handle<Object> stack_trace);
- static Handle<Object> NewIteratorError(Isolate* isolate,
- Handle<Object> source);
- static Handle<Object> NewCalledNonCallableError(Isolate* isolate,
- Handle<Object> source);
- static Handle<Object> NewConstructedNonConstructable(Isolate* isolate,
- Handle<Object> source);
+ static Handle<JSObject> NewIteratorError(Isolate* isolate,
+ Handle<Object> source);
+ static Handle<JSObject> NewCalledNonCallableError(Isolate* isolate,
+ Handle<Object> source);
+ static Handle<JSObject> NewConstructedNonConstructable(Isolate* isolate,
+ Handle<Object> source);
+ // Returns the Exception sentinel.
static Object ThrowSpreadArgError(Isolate* isolate, MessageTemplate id,
Handle<Object> object);
- static Object ThrowLoadFromNullOrUndefined(Isolate* isolate,
- Handle<Object> object);
+ // Returns the Exception sentinel.
static Object ThrowLoadFromNullOrUndefined(Isolate* isolate,
Handle<Object> object,
MaybeHandle<Object> key);
diff --git a/deps/v8/src/execution/ppc/frame-constants-ppc.h b/deps/v8/src/execution/ppc/frame-constants-ppc.h
index 0931ffe101..d29bd8c450 100644
--- a/deps/v8/src/execution/ppc/frame-constants-ppc.h
+++ b/deps/v8/src/execution/ppc/frame-constants-ppc.h
@@ -15,7 +15,9 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
// Need to take constant pool into account.
- static constexpr int kCallerFPOffset = -4 * kSystemPointerSize;
+ static constexpr int kCallerFPOffset = FLAG_enable_embedded_constant_pool
+ ? -4 * kSystemPointerSize
+ : -3 * kSystemPointerSize;
};
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
diff --git a/deps/v8/src/execution/runtime-profiler.cc b/deps/v8/src/execution/runtime-profiler.cc
index 686fa23751..b7b8f5963c 100644
--- a/deps/v8/src/execution/runtime-profiler.cc
+++ b/deps/v8/src/execution/runtime-profiler.cc
@@ -92,8 +92,20 @@ void TraceHeuristicOptimizationDisallowed(JSFunction function) {
}
}
+// TODO(jgruber): Remove this once we include this tracing with --trace-opt.
+void TraceNCIRecompile(JSFunction function, OptimizationReason reason) {
+ if (FLAG_trace_turbo_nci) {
+ StdoutStream os;
+ os << "NCI tierup mark: " << Brief(function) << ", "
+ << OptimizationReasonToString(reason) << std::endl;
+ }
+}
+
void TraceRecompile(JSFunction function, OptimizationReason reason,
- Isolate* isolate) {
+ CodeKind code_kind, Isolate* isolate) {
+ if (code_kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT) {
+ TraceNCIRecompile(function, reason);
+ }
if (FLAG_trace_opt) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[marking ");
@@ -104,22 +116,15 @@ void TraceRecompile(JSFunction function, OptimizationReason reason,
}
}
-void TraceNCIRecompile(JSFunction function, OptimizationReason reason) {
- if (FLAG_trace_turbo_nci) {
- StdoutStream os;
- os << "NCI tierup mark: " << Brief(function) << ", "
- << OptimizationReasonToString(reason) << std::endl;
- }
-}
-
} // namespace
RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
: isolate_(isolate), any_ic_changed_(false) {}
-void RuntimeProfiler::Optimize(JSFunction function, OptimizationReason reason) {
+void RuntimeProfiler::Optimize(JSFunction function, OptimizationReason reason,
+ CodeKind code_kind) {
DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
- TraceRecompile(function, reason, isolate_);
+ TraceRecompile(function, reason, code_kind, isolate_);
function.MarkForOptimization(ConcurrencyMode::kConcurrent);
}
@@ -150,43 +155,15 @@ void RuntimeProfiler::AttemptOnStackReplacement(InterpretedFrame* frame,
Min(level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker));
}
-void RuntimeProfiler::MaybeOptimizeInterpretedFrame(JSFunction function,
- InterpretedFrame* frame) {
+void RuntimeProfiler::MaybeOptimizeFrame(JSFunction function,
+ JavaScriptFrame* frame,
+ CodeKind code_kind) {
+ DCHECK(CodeKindCanTierUp(code_kind));
if (function.IsInOptimizationQueue()) {
TraceInOptimizationQueue(function);
return;
}
- if (FLAG_testing_d8_test_runner &&
- !PendingOptimizationTable::IsHeuristicOptimizationAllowed(isolate_,
- function)) {
- TraceHeuristicOptimizationDisallowed(function);
- return;
- }
-
- if (function.shared().optimization_disabled()) return;
-
- if (FLAG_always_osr) {
- AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
- // Fall through and do a normal optimized compile as well.
- } else if (MaybeOSR(function, frame)) {
- return;
- }
- OptimizationReason reason =
- ShouldOptimize(function, function.shared().GetBytecodeArray());
-
- if (reason != OptimizationReason::kDoNotOptimize) {
- Optimize(function, reason);
- }
-}
-
-void RuntimeProfiler::MaybeOptimizeNCIFrame(JSFunction function) {
- DCHECK_EQ(function.code().kind(), CodeKind::NATIVE_CONTEXT_INDEPENDENT);
-
- if (function.IsInOptimizationQueue()) {
- TraceInOptimizationQueue(function);
- return;
- }
if (FLAG_testing_d8_test_runner &&
!PendingOptimizationTable::IsHeuristicOptimizationAllowed(isolate_,
function)) {
@@ -196,15 +173,24 @@ void RuntimeProfiler::MaybeOptimizeNCIFrame(JSFunction function) {
if (function.shared().optimization_disabled()) return;
- // Note: We currently do not trigger OSR compilation from NCI code.
+ // Note: We currently do not trigger OSR compilation from NCI or TP code.
// TODO(jgruber,v8:8888): But we should.
+ if (frame->is_interpreted()) {
+ DCHECK_EQ(code_kind, CodeKind::INTERPRETED_FUNCTION);
+ if (FLAG_always_osr) {
+ AttemptOnStackReplacement(InterpretedFrame::cast(frame),
+ AbstractCode::kMaxLoopNestingMarker);
+ // Fall through and do a normal optimized compile as well.
+ } else if (MaybeOSR(function, InterpretedFrame::cast(frame))) {
+ return;
+ }
+ }
OptimizationReason reason =
ShouldOptimize(function, function.shared().GetBytecodeArray());
if (reason != OptimizationReason::kDoNotOptimize) {
- TraceNCIRecompile(function, reason);
- Optimize(function, reason);
+ Optimize(function, reason, code_kind);
}
}
@@ -224,6 +210,9 @@ bool RuntimeProfiler::MaybeOSR(JSFunction function, InterpretedFrame* frame) {
function.HasAvailableOptimizedCode()) {
// Attempt OSR if we are still running interpreted code even though the
// the function has long been marked or even already been optimized.
+ // TODO(turboprop, mythria): Currently we don't tier up from Turboprop code
+ // to Turbofan OSR code. When we start supporting this, the ticks have to be
+ // scaled accordingly
int64_t allowance =
kOSRBytecodeSizeAllowanceBase +
static_cast<int64_t>(ticks) * kOSRBytecodeSizeAllowancePerTick;
@@ -240,22 +229,31 @@ OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction function,
if (function.ActiveTierIsTurbofan()) {
return OptimizationReason::kDoNotOptimize;
}
+ if (V8_UNLIKELY(FLAG_turboprop) && function.ActiveTierIsToptierTurboprop()) {
+ return OptimizationReason::kDoNotOptimize;
+ }
int ticks = function.feedback_vector().profiler_ticks();
+ int scale_factor = function.ActiveTierIsMidtierTurboprop()
+ ? FLAG_ticks_scale_factor_for_top_tier
+ : 1;
int ticks_for_optimization =
kProfilerTicksBeforeOptimization +
(bytecode.length() / kBytecodeSizeAllowancePerTick);
+ ticks_for_optimization *= scale_factor;
if (ticks >= ticks_for_optimization) {
return OptimizationReason::kHotAndStable;
} else if (!any_ic_changed_ &&
bytecode.length() < kMaxBytecodeSizeForEarlyOpt) {
+ // TODO(turboprop, mythria): Do we need to support small function
+ // optimization for TP->TF tier up. If so, do we want to scale the bytecode
+ // size?
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
return OptimizationReason::kSmallFunction;
} else if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
function.PrintName();
- PrintF(", not enough ticks: %d/%d and ", ticks,
- kProfilerTicksBeforeOptimization);
+ PrintF(", not enough ticks: %d/%d and ", ticks, ticks_for_optimization);
if (any_ic_changed_) {
PrintF("ICs changed]\n");
} else {
@@ -293,7 +291,7 @@ void RuntimeProfiler::MarkCandidatesForOptimizationFromBytecode() {
if (!function.has_feedback_vector()) continue;
- MaybeOptimizeInterpretedFrame(function, InterpretedFrame::cast(frame));
+ MaybeOptimizeFrame(function, frame, CodeKind::INTERPRETED_FUNCTION);
// TODO(leszeks): Move this increment to before the maybe optimize checks,
// and update the tests to assume the increment has already happened.
@@ -311,7 +309,8 @@ void RuntimeProfiler::MarkCandidatesForOptimizationFromCode() {
if (!frame->is_optimized()) continue;
JSFunction function = frame->function();
- if (function.code().kind() != CodeKind::NATIVE_CONTEXT_INDEPENDENT) {
+ auto code_kind = function.code().kind();
+ if (!CodeKindIsOptimizedAndCanTierUp(code_kind)) {
continue;
}
@@ -320,7 +319,7 @@ void RuntimeProfiler::MarkCandidatesForOptimizationFromCode() {
function.feedback_vector().SaturatingIncrementProfilerTicks();
- MaybeOptimizeNCIFrame(function);
+ MaybeOptimizeFrame(function, frame, code_kind);
}
}
diff --git a/deps/v8/src/execution/runtime-profiler.h b/deps/v8/src/execution/runtime-profiler.h
index d7125ef73c..b4207d03f9 100644
--- a/deps/v8/src/execution/runtime-profiler.h
+++ b/deps/v8/src/execution/runtime-profiler.h
@@ -15,7 +15,9 @@ namespace internal {
class BytecodeArray;
class Isolate;
class InterpretedFrame;
+class JavaScriptFrame;
class JSFunction;
+enum class CodeKind;
enum class OptimizationReason : uint8_t;
class RuntimeProfiler {
@@ -35,15 +37,16 @@ class RuntimeProfiler {
private:
// Make the decision whether to optimize the given function, and mark it for
// optimization if the decision was 'yes'.
- void MaybeOptimizeNCIFrame(JSFunction function);
- void MaybeOptimizeInterpretedFrame(JSFunction function,
- InterpretedFrame* frame);
+ void MaybeOptimizeFrame(JSFunction function, JavaScriptFrame* frame,
+ CodeKind code_kind);
+
// Potentially attempts OSR from and returns whether no other
// optimization attempts should be made.
bool MaybeOSR(JSFunction function, InterpretedFrame* frame);
OptimizationReason ShouldOptimize(JSFunction function,
BytecodeArray bytecode_array);
- void Optimize(JSFunction function, OptimizationReason reason);
+ void Optimize(JSFunction function, OptimizationReason reason,
+ CodeKind code_kind);
void Baseline(JSFunction function, OptimizationReason reason);
class MarkCandidatesForOptimizationScope final {
diff --git a/deps/v8/src/execution/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 3c30c87583..a9fc318e4b 100644
--- a/deps/v8/src/execution/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -3405,9 +3405,10 @@ EVALUATE(VPKLS) {
template <class S, class D>
void VectorUnpackHigh(void* dst, void* src) {
+ constexpr size_t kItemCount = kSimd128Size / sizeof(D);
D value = 0;
- for (size_t i = 0; i < kSimd128Size / sizeof(D); i++) {
- value = *(reinterpret_cast<S*>(src) + i);
+ for (size_t i = 0; i < kItemCount; i++) {
+ value = *(reinterpret_cast<S*>(src) + i + kItemCount);
memcpy(reinterpret_cast<D*>(dst) + i, &value, sizeof(D));
}
}
@@ -3462,11 +3463,14 @@ EVALUATE(VUPLH) {
template <class S, class D>
void VectorUnpackLow(void* dst, void* src) {
- D value = 0;
- size_t count = kSimd128Size / sizeof(D);
- for (size_t i = 0; i < count; i++) {
- value = *(reinterpret_cast<S*>(src) + i + count);
- memcpy(reinterpret_cast<D*>(dst) + i, &value, sizeof(D));
+ constexpr size_t kItemCount = kSimd128Size / sizeof(D);
+ D temps[kItemCount] = {0};
+ // About overwriting if src and dst are the same register.
+ for (size_t i = 0; i < kItemCount; i++) {
+ temps[i] = static_cast<D>(*(reinterpret_cast<S*>(src) + i));
+ }
+ for (size_t i = 0; i < kItemCount; i++) {
+ memcpy(reinterpret_cast<D*>(dst) + i, &temps[i], sizeof(D));
}
}
@@ -3742,15 +3746,14 @@ EVALUATE(VPERM) {
USE(m6);
for (int i = 0; i < kSimd128Size; i++) {
int8_t lane_num = get_simd_register_by_lane<int8_t>(r4, i);
+ // Get the five least significant bits.
+ lane_num = (lane_num << 3) >> 3;
int reg = r2;
if (lane_num >= kSimd128Size) {
lane_num = lane_num - kSimd128Size;
reg = r3;
}
- int8_t result = 0;
- if (lane_num >= 0 && lane_num < kSimd128Size * 2) {
- result = get_simd_register_by_lane<int8_t>(reg, lane_num);
- }
+ int8_t result = get_simd_register_by_lane<int8_t>(reg, lane_num);
set_simd_register_by_lane<int8_t>(r1, i, result);
}
return length;
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index fd0cf91333..de288fe2f5 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -82,7 +82,7 @@ void InvokeGC(v8::Isolate* isolate, v8::Isolate::GarbageCollectionType type,
kGCCallbackFlagForced);
break;
case v8::Isolate::GarbageCollectionType::kFullGarbageCollection:
- heap->SetEmbedderStackStateForNextFinalizaton(embedder_stack_state);
+ heap->SetEmbedderStackStateForNextFinalization(embedder_stack_state);
heap->PreciseCollectAllGarbage(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting,
kGCCallbackFlagForced);
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index ab689283e9..00fcf712d6 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -242,8 +242,6 @@ DEFINE_BOOL(es_staging, false,
DEFINE_BOOL(harmony, false, "enable all completed harmony features")
DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
DEFINE_IMPLICATION(es_staging, harmony)
-// Enabling import.meta requires to also enable import()
-DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
// Enabling FinalizationRegistry#cleanupSome also enables weak refs
DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
@@ -254,7 +252,8 @@ DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
V(harmony_regexp_sequence, "RegExp Unicode sequence properties") \
V(harmony_weak_refs_with_cleanup_some, \
"harmony weak references with FinalizationRegistry.prototype.cleanupSome") \
- V(harmony_regexp_match_indices, "harmony regexp match indices")
+ V(harmony_regexp_match_indices, "harmony regexp match indices") \
+ V(harmony_import_assertions, "harmony import assertions")
#ifdef V8_INTL_SUPPORT
#define HARMONY_INPROGRESS(V) \
@@ -279,13 +278,8 @@ DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
// Features that are shipping (turned on by default, but internal flag remains).
#define HARMONY_SHIPPING_BASE(V) \
- V(harmony_namespace_exports, \
- "harmony namespace exports (export * as foo from 'bar')") \
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
V(harmony_atomics, "harmony atomics") \
- V(harmony_import_meta, "harmony import.meta property") \
- V(harmony_dynamic_import, "harmony dynamic import") \
- V(harmony_promise_all_settled, "harmony Promise.allSettled") \
V(harmony_promise_any, "harmony Promise.any") \
V(harmony_private_methods, "harmony private methods in class literals") \
V(harmony_weak_refs, "harmony weak references") \
@@ -365,6 +359,10 @@ DEFINE_IMPLICATION(lite_mode, optimize_for_size)
#define V8_ENABLE_THIRD_PARTY_HEAP_BOOL false
#endif
+DEFINE_NEG_IMPLICATION(enable_third_party_heap, inline_new)
+DEFINE_NEG_IMPLICATION(enable_third_party_heap, allocation_site_pretenuring)
+DEFINE_NEG_IMPLICATION(enable_third_party_heap, turbo_allocation_folding)
+
DEFINE_BOOL_READONLY(enable_third_party_heap, V8_ENABLE_THIRD_PARTY_HEAP_BOOL,
"Use third-party heap")
@@ -425,6 +423,7 @@ DEFINE_BOOL(future, FUTURE_BOOL,
DEFINE_WEAK_IMPLICATION(future, write_protect_code_memory)
DEFINE_WEAK_IMPLICATION(future, finalize_streaming_on_background)
+DEFINE_WEAK_IMPLICATION(future, super_ic)
// Flags for jitless
DEFINE_BOOL(jitless, V8_LITE_BOOL,
@@ -449,6 +448,10 @@ DEFINE_NEG_IMPLICATION(jitless, interpreted_frames_native_stack)
DEFINE_BOOL(assert_types, false,
"generate runtime type assertions to test the typer")
+DEFINE_BOOL(trace_code_dependencies, false, "trace code dependencies")
+// Depend on --trace-deopt-verbose for reporting dependency invalidations.
+DEFINE_IMPLICATION(trace_code_dependencies, trace_deopt_verbose)
+
// Flags for experimental implementation features.
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
@@ -548,15 +551,24 @@ DEFINE_BOOL(trace_generalization, false, "trace map generalization")
// Flags for TurboProp.
DEFINE_BOOL(turboprop, false, "enable experimental turboprop mid-tier compiler")
-DEFINE_BOOL(turboprop_mid_tier_reg_alloc, false,
- "enable experimental mid-tier register allocator")
-DEFINE_NEG_IMPLICATION(turboprop, turbo_inlining)
+DEFINE_BOOL(turboprop_mid_tier_reg_alloc, true,
+ "enable mid-tier register allocator for turboprop")
+DEFINE_BOOL(turboprop_dynamic_map_checks, false,
+ "use dynamic map checks when generating code for property accesses "
+ "if all handlers in an IC are the same for turboprop")
+DEFINE_BOOL(turboprop_as_midtier, false,
+ "enable experimental turboprop mid-tier compiler")
+DEFINE_IMPLICATION(turboprop_as_midtier, turboprop)
DEFINE_IMPLICATION(turboprop, concurrent_inlining)
DEFINE_VALUE_IMPLICATION(turboprop, interrupt_budget, 15 * KB)
DEFINE_VALUE_IMPLICATION(turboprop, reuse_opt_code_count, 2)
-DEFINE_IMPLICATION(turboprop, dynamic_map_checks)
DEFINE_UINT_READONLY(max_minimorphic_map_checks, 4,
"max number of map checks to perform in minimorphic state")
+// Since Turboprop uses much lower value for interrupt budget, we need to wait
+// for a higher number of ticks to tierup to Turbofan roughly match the default.
+// The default of 10 is approximately the ration of TP to TF interrupt budget.
+DEFINE_INT(ticks_scale_factor_for_top_tier, 10,
+ "scale factor for profiler ticks when tiering up from midtier")
// Flags for concurrent recompilation.
DEFINE_BOOL(concurrent_recompilation, true,
@@ -723,9 +735,6 @@ DEFINE_BOOL(
DEFINE_BOOL(turbo_fast_api_calls, false, "enable fast API calls from TurboFan")
DEFINE_INT(reuse_opt_code_count, 0,
"don't discard optimized code for the specified number of deopts.")
-DEFINE_BOOL(dynamic_map_checks, false,
- "use dynamic map checks when generating code for property accesses "
- "if all handlers in an IC are the same")
// Native context independent (NCI) code.
DEFINE_BOOL(turbo_nci, false,
@@ -739,6 +748,18 @@ DEFINE_BOOL(print_nci_code, false, "print native context independent code.")
DEFINE_BOOL(trace_turbo_nci, false, "trace native context independent code.")
DEFINE_BOOL(turbo_collect_feedback_in_generic_lowering, true,
"enable experimental feedback collection in generic lowering.")
+// TODO(jgruber,v8:8888): Remove this flag once we've settled on a codegen
+// strategy.
+DEFINE_BOOL(turbo_nci_delayed_codegen, true,
+ "delay NCI codegen to reduce useless compilation work.")
+// TODO(jgruber,v8:8888): Remove this flag once we've settled on an ageing
+// strategy.
+DEFINE_BOOL(turbo_nci_cache_ageing, false,
+ "enable ageing of the NCI code cache.")
+// TODO(jgruber,v8:8888): Remove this flag once we've settled on an ageing
+// strategy.
+DEFINE_BOOL(isolate_script_cache_ageing, true,
+ "enable ageing of the isolate script cache.")
// Favor memory over execution speed.
DEFINE_BOOL(optimize_for_size, false,
@@ -793,7 +814,7 @@ DEFINE_INT(trace_wasm_ast_start, 0,
DEFINE_INT(trace_wasm_ast_end, 0, "end function for wasm AST trace (exclusive)")
DEFINE_BOOL(liftoff, true,
"enable Liftoff, the baseline compiler for WebAssembly")
-DEFINE_BOOL(liftoff_extern_ref, false,
+DEFINE_BOOL(experimental_liftoff_extern_ref, false,
"enable support for externref in Liftoff")
// We can't tier up (from Liftoff to TurboFan) in single-threaded mode, hence
// disable Liftoff in that configuration for now. The alternative is disabling
@@ -846,7 +867,7 @@ DEFINE_BOOL(wasm_staging, false, "enable staged wasm features")
FOREACH_WASM_STAGING_FEATURE_FLAG(WASM_STAGING_IMPLICATION)
#undef WASM_STAGING_IMPLICATION
-DEFINE_BOOL(wasm_opt, false, "enable wasm optimization")
+DEFINE_BOOL(wasm_opt, true, "enable wasm optimization")
DEFINE_BOOL(
wasm_bounds_checks, true,
"enable bounds checks (disable for performance testing only)")
@@ -861,8 +882,10 @@ DEFINE_BOOL(wasm_trap_handler, true,
DEFINE_BOOL(wasm_fuzzer_gen_test, false,
"generate a test case when running a wasm fuzzer")
DEFINE_IMPLICATION(wasm_fuzzer_gen_test, single_threaded)
-DEFINE_BOOL(print_wasm_code, false, "Print WebAssembly code")
-DEFINE_BOOL(print_wasm_stub_code, false, "Print WebAssembly stub code")
+DEFINE_BOOL(print_wasm_code, false, "print WebAssembly code")
+DEFINE_INT(print_wasm_code_function_index, -1,
+ "print WebAssembly code for function at index")
+DEFINE_BOOL(print_wasm_stub_code, false, "print WebAssembly stub code")
DEFINE_BOOL(asm_wasm_lazy_compilation, false,
"enable lazy compilation for asm-wasm modules")
DEFINE_IMPLICATION(validate_asm, asm_wasm_lazy_compilation)
@@ -873,10 +896,6 @@ DEFINE_DEBUG_BOOL(trace_wasm_lazy_compilation, false,
DEFINE_BOOL(wasm_lazy_validation, false,
"enable lazy validation for lazily compiled wasm functions")
-// Flags for wasm prototyping that are not strictly features i.e., part of
-// an existing proposal that may be conditionally enabled.
-DEFINE_BOOL(wasm_atomics_on_non_shared_memory, true,
- "allow atomic operations on non-shared WebAssembly memory")
DEFINE_BOOL(wasm_grow_shared_memory, true,
"allow growing shared WebAssembly memory objects")
DEFINE_BOOL(wasm_simd_post_mvp, false,
@@ -990,7 +1009,7 @@ DEFINE_BOOL(scavenge_separate_stack_scanning, false,
"use a separate phase for stack scanning in scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
DEFINE_BOOL(write_protect_code_memory, true, "write protect code memory")
-#ifdef V8_CONCURRENT_MARKING
+#if defined(V8_ATOMIC_MARKING_STATE) && defined(V8_ATOMIC_OBJECT_FIELD_WRITES)
#define V8_CONCURRENT_MARKING_BOOL true
#else
#define V8_CONCURRENT_MARKING_BOOL false
@@ -1388,6 +1407,13 @@ DEFINE_BOOL(log_colour, ENABLE_LOG_COLOUR,
DEFINE_BOOL(trace_sim_messages, false,
"Trace simulator debug messages. Implied by --trace-sim.")
+#if defined V8_TARGET_ARCH_ARM64
+// pointer-auth-arm64.cc
+DEFINE_DEBUG_BOOL(sim_abort_on_bad_auth, false,
+ "Stop execution when a pointer authentication fails in the "
+ "ARM64 simulator.")
+#endif
+
// isolate.cc
DEFINE_BOOL(async_stack_traces, true,
"include async stack traces in Error.stack")
@@ -1443,13 +1469,6 @@ DEFINE_BOOL(profile_deserialization, false,
"Print the time it takes to deserialize the snapshot.")
DEFINE_BOOL(serialization_statistics, false,
"Collect statistics on serialized objects.")
-#ifdef V8_ENABLE_THIRD_PARTY_HEAP
-DEFINE_UINT_READONLY(serialization_chunk_size, 1,
- "Custom size for serialization chunks")
-#else
-DEFINE_UINT(serialization_chunk_size, 4096,
- "Custom size for serialization chunks")
-#endif
// Regexp
DEFINE_BOOL(regexp_optimization, true, "generate optimized regexp code")
DEFINE_BOOL(regexp_mode_modifiers, false, "enable inline flags in regexp.")
@@ -1476,10 +1495,22 @@ DEFINE_BOOL(trace_regexp_parser, false, "trace regexp parsing")
DEFINE_BOOL(trace_regexp_tier_up, false, "trace regexp tiering up execution")
DEFINE_BOOL(enable_experimental_regexp_engine, false,
- "enable experimental linear time regexp engine")
+ "recognize regexps with 'l' flag, run them on experimental engine")
+DEFINE_BOOL(default_to_experimental_regexp_engine, false,
+ "run regexps with the experimental engine where possible")
+DEFINE_IMPLICATION(default_to_experimental_regexp_engine,
+ enable_experimental_regexp_engine)
DEFINE_BOOL(trace_experimental_regexp_engine, false,
"trace execution of experimental regexp engine")
+DEFINE_BOOL(enable_experimental_regexp_engine_on_excessive_backtracks, false,
+ "fall back to a breadth-first regexp engine on excessive "
+ "backtracking")
+DEFINE_UINT(regexp_backtracks_before_fallback, 50000,
+ "number of backtracks during regexp execution before fall back "
+ "to experimental engine if "
+ "enable_experimental_regexp_engine_on_excessive_backtracks is set")
+
// Testing flags test/cctest/test-{flags,api,serialization}.cc
DEFINE_BOOL(testing_bool_flag, true, "testing_bool_flag")
DEFINE_MAYBE_BOOL(testing_maybe_bool_flag, "testing_maybe_bool_flag")
@@ -1520,6 +1551,11 @@ DEFINE_STRING(turbo_profiling_log_file, nullptr,
"Path of the input file containing basic block counters for "
"builtins. (mksnapshot only)")
+// On some platforms, the .text section only has execute permissions.
+DEFINE_BOOL(text_is_readable, true,
+ "Whether the .text section of binary can be read")
+DEFINE_NEG_NEG_IMPLICATION(text_is_readable, partial_constant_pool)
+
//
// Minor mark compact collector flags.
//
diff --git a/deps/v8/src/handles/DIR_METADATA b/deps/v8/src/handles/DIR_METADATA
new file mode 100644
index 0000000000..ff55846b31
--- /dev/null
+++ b/deps/v8/src/handles/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>GC"
+} \ No newline at end of file
diff --git a/deps/v8/src/handles/OWNERS b/deps/v8/src/handles/OWNERS
index 75a534ce98..4df0a2548a 100644
--- a/deps/v8/src/handles/OWNERS
+++ b/deps/v8/src/handles/OWNERS
@@ -3,5 +3,3 @@ ishell@chromium.org
jkummerow@chromium.org
mlippautz@chromium.org
ulan@chromium.org
-
-# COMPONENT: Blink>JavaScript>GC
diff --git a/deps/v8/src/handles/global-handles.cc b/deps/v8/src/handles/global-handles.cc
index 7a91116ac1..1782514d6e 100644
--- a/deps/v8/src/handles/global-handles.cc
+++ b/deps/v8/src/handles/global-handles.cc
@@ -5,6 +5,7 @@
#include "src/handles/global-handles.h"
#include <algorithm>
+#include <cstdint>
#include <map>
#include "src/api/api-inl.h"
@@ -79,9 +80,8 @@ class GlobalHandles::NodeBlock final {
template <class NodeType>
const GlobalHandles::NodeBlock<NodeType>*
GlobalHandles::NodeBlock<NodeType>::From(const NodeType* node) {
- uintptr_t ptr = reinterpret_cast<const uintptr_t>(node) -
- sizeof(NodeType) * node->index();
- const BlockType* block = reinterpret_cast<const BlockType*>(ptr);
+ const NodeType* firstNode = node - node->index();
+ const BlockType* block = reinterpret_cast<const BlockType*>(firstNode);
DCHECK_EQ(node, block->at(node->index()));
return block;
}
@@ -89,9 +89,8 @@ GlobalHandles::NodeBlock<NodeType>::From(const NodeType* node) {
template <class NodeType>
GlobalHandles::NodeBlock<NodeType>* GlobalHandles::NodeBlock<NodeType>::From(
NodeType* node) {
- uintptr_t ptr =
- reinterpret_cast<uintptr_t>(node) - sizeof(NodeType) * node->index();
- BlockType* block = reinterpret_cast<BlockType*>(ptr);
+ NodeType* firstNode = node - node->index();
+ BlockType* block = reinterpret_cast<BlockType*>(firstNode);
DCHECK_EQ(node, block->at(node->index()));
return block;
}
@@ -381,7 +380,7 @@ namespace {
void ExtractInternalFields(JSObject jsobject, void** embedder_fields, int len) {
int field_count = jsobject.GetEmbedderFieldCount();
- const Isolate* isolate = GetIsolateForPtrCompr(jsobject);
+ IsolateRoot isolate = GetIsolateForPtrCompr(jsobject);
for (int i = 0; i < len; ++i) {
if (field_count == i) break;
void* pointer;
@@ -748,14 +747,10 @@ class GlobalHandles::OnStackTracedNodeSpace final {
void SetStackStart(void* stack_start) {
CHECK(on_stack_nodes_.empty());
- stack_start_ =
- GetStackAddressForSlot(reinterpret_cast<uintptr_t>(stack_start));
+ stack_start_ = base::Stack::GetRealStackAddressForSlot(stack_start);
}
- bool IsOnStack(uintptr_t slot) const {
- const uintptr_t address = GetStackAddressForSlot(slot);
- return stack_start_ >= address && address > GetCurrentStackPosition();
- }
+ V8_INLINE bool IsOnStack(uintptr_t slot) const;
void Iterate(RootVisitor* v);
TracedNode* Acquire(Object value, uintptr_t address);
@@ -772,32 +767,36 @@ class GlobalHandles::OnStackTracedNodeSpace final {
GlobalHandles* global_handles;
};
- uintptr_t GetStackAddressForSlot(uintptr_t slot) const;
-
- // Keeps track of registered handles and their stack address. The data
- // structure is cleaned on iteration and when adding new references using the
- // current stack address.
+ // Keeps track of registered handles. The data structure is cleaned on
+ // iteration and when adding new references using the current stack address.
+ // Cleaning is based on current stack address and the key of the map which is
+ // slightly different for ASAN configs -- see below.
+#ifdef V8_USE_ADDRESS_SANITIZER
+ // Mapping from stack slots or real stack frames to the corresponding nodes.
+ // In case a reference is part of a fake frame, we map it to the real stack
+ // frame base instead of the actual stack slot. The list keeps all nodes for
+ // a particular real frame.
+ std::map<uintptr_t, std::list<NodeEntry>> on_stack_nodes_;
+#else // !V8_USE_ADDRESS_SANITIZER
+ // Mapping from stack slots to the corresponding nodes. We don't expect
+ // aliasing with overlapping lifetimes of nodes.
std::map<uintptr_t, NodeEntry> on_stack_nodes_;
+#endif // !V8_USE_ADDRESS_SANITIZER
+
uintptr_t stack_start_ = 0;
GlobalHandles* global_handles_ = nullptr;
size_t acquire_count_ = 0;
};
-uintptr_t GlobalHandles::OnStackTracedNodeSpace::GetStackAddressForSlot(
- uintptr_t slot) const {
+bool GlobalHandles::OnStackTracedNodeSpace::IsOnStack(uintptr_t slot) const {
#ifdef V8_USE_ADDRESS_SANITIZER
- void* fake_stack = __asan_get_current_fake_stack();
- if (fake_stack) {
- void* fake_frame_start;
- void* real_frame = __asan_addr_is_in_fake_stack(
- fake_stack, reinterpret_cast<void*>(slot), &fake_frame_start, nullptr);
- if (real_frame) {
- return reinterpret_cast<uintptr_t>(real_frame) +
- (slot - reinterpret_cast<uintptr_t>(fake_frame_start));
- }
+ if (__asan_addr_is_in_fake_stack(__asan_get_current_fake_stack(),
+ reinterpret_cast<void*>(slot), nullptr,
+ nullptr)) {
+ return true;
}
#endif // V8_USE_ADDRESS_SANITIZER
- return slot;
+ return stack_start_ >= slot && slot > base::Stack::GetCurrentStackPosition();
}
void GlobalHandles::OnStackTracedNodeSpace::NotifyEmptyEmbedderStack() {
@@ -805,6 +804,17 @@ void GlobalHandles::OnStackTracedNodeSpace::NotifyEmptyEmbedderStack() {
}
void GlobalHandles::OnStackTracedNodeSpace::Iterate(RootVisitor* v) {
+#ifdef V8_USE_ADDRESS_SANITIZER
+ for (auto& pair : on_stack_nodes_) {
+ for (auto& node_entry : pair.second) {
+ TracedNode& node = node_entry.node;
+ if (node.IsRetainer()) {
+ v->VisitRootPointer(Root::kGlobalHandles, "on-stack TracedReference",
+ node.location());
+ }
+ }
+ }
+#else // !V8_USE_ADDRESS_SANITIZER
// Handles have been cleaned from the GC entry point which is higher up the
// stack.
for (auto& pair : on_stack_nodes_) {
@@ -814,6 +824,7 @@ void GlobalHandles::OnStackTracedNodeSpace::Iterate(RootVisitor* v) {
node.location());
}
}
+#endif // !V8_USE_ADDRESS_SANITIZER
}
GlobalHandles::TracedNode* GlobalHandles::OnStackTracedNodeSpace::Acquire(
@@ -828,8 +839,14 @@ GlobalHandles::TracedNode* GlobalHandles::OnStackTracedNodeSpace::Acquire(
NodeEntry entry;
entry.node.Free(nullptr);
entry.global_handles = global_handles_;
- auto pair =
- on_stack_nodes_.insert({GetStackAddressForSlot(slot), std::move(entry)});
+#ifdef V8_USE_ADDRESS_SANITIZER
+ auto pair = on_stack_nodes_.insert(
+ {base::Stack::GetRealStackAddressForSlot(slot), {}});
+ pair.first->second.push_back(std::move(entry));
+ TracedNode* result = &(pair.first->second.back().node);
+#else // !V8_USE_ADDRESS_SANITIZER
+ auto pair = on_stack_nodes_.insert(
+ {base::Stack::GetRealStackAddressForSlot(slot), std::move(entry)});
if (!pair.second) {
// Insertion failed because there already was an entry present for that
// stack address. This can happen because cleanup is conservative in which
@@ -838,6 +855,7 @@ GlobalHandles::TracedNode* GlobalHandles::OnStackTracedNodeSpace::Acquire(
pair.first->second.node.Free(nullptr);
}
TracedNode* result = &(pair.first->second.node);
+#endif // !V8_USE_ADDRESS_SANITIZER
result->Acquire(value);
result->set_is_on_stack(true);
return result;
@@ -845,7 +863,8 @@ GlobalHandles::TracedNode* GlobalHandles::OnStackTracedNodeSpace::Acquire(
void GlobalHandles::OnStackTracedNodeSpace::CleanupBelowCurrentStackPosition() {
if (on_stack_nodes_.empty()) return;
- const auto it = on_stack_nodes_.upper_bound(GetCurrentStackPosition());
+ const auto it =
+ on_stack_nodes_.upper_bound(base::Stack::GetCurrentStackPosition());
on_stack_nodes_.erase(on_stack_nodes_.begin(), it);
}
@@ -1073,7 +1092,7 @@ void GlobalHandles::MoveTracedGlobal(Address** from, Address** to) {
}
}
DestroyTraced(*from);
- *from = nullptr;
+ SetSlotThreadSafe(from, nullptr);
} else {
// Pure heap move.
DestroyTraced(*to);
@@ -1086,7 +1105,7 @@ void GlobalHandles::MoveTracedGlobal(Address** from, Address** to) {
if (to_node->has_destructor()) {
to_node->set_parameter(to);
}
- *from = nullptr;
+ SetSlotThreadSafe(from, nullptr);
}
TracedNode::Verify(global_handles, to);
}
diff --git a/deps/v8/src/handles/global-handles.h b/deps/v8/src/handles/global-handles.h
index ccd3e4ceda..bcca8627d1 100644
--- a/deps/v8/src/handles/global-handles.h
+++ b/deps/v8/src/handles/global-handles.h
@@ -10,12 +10,12 @@
#include <utility>
#include <vector>
-#include "include/v8.h"
#include "include/v8-profiler.h"
-
-#include "src/utils/utils.h"
+#include "include/v8.h"
#include "src/handles/handles.h"
+#include "src/heap/heap.h"
#include "src/objects/objects.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -323,6 +323,52 @@ class EternalHandles final {
DISALLOW_COPY_AND_ASSIGN(EternalHandles);
};
+// A vector of global Handles which automatically manages the backing of those
+// Handles as a vector of strong-rooted addresses. Handles returned by the
+// vector are valid as long as they are present in the vector.
+template <typename T>
+class GlobalHandleVector {
+ public:
+ class Iterator {
+ public:
+ explicit Iterator(
+ std::vector<Address, StrongRootBlockAllocator>::iterator it)
+ : it_(it) {}
+ Iterator& operator++() {
+ ++it_;
+ return *this;
+ }
+ Handle<T> operator*() { return Handle<T>(&*it_); }
+ bool operator!=(Iterator& that) { return it_ != that.it_; }
+
+ private:
+ std::vector<Address, StrongRootBlockAllocator>::iterator it_;
+ };
+
+ explicit GlobalHandleVector(Heap* heap)
+ : locations_(StrongRootBlockAllocator(heap)) {}
+
+ Handle<T> operator[](size_t i) { return Handle<T>(&locations_[i]); }
+
+ size_t size() const { return locations_.size(); }
+ bool empty() const { return locations_.empty(); }
+
+ void Push(T val) { locations_.push_back(val.ptr()); }
+ // Handles into the GlobalHandleVector become invalid when they are removed,
+ // so "pop" returns a raw object rather than a handle.
+ T Pop() {
+ T obj = T::cast(Object(locations_.back()));
+ locations_.pop_back();
+ return obj;
+ }
+
+ Iterator begin() { return Iterator(locations_.begin()); }
+ Iterator end() { return Iterator(locations_.end()); }
+
+ private:
+ std::vector<Address, StrongRootBlockAllocator> locations_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handles/handles-inl.h b/deps/v8/src/handles/handles-inl.h
index b263187ecd..0215d13ddb 100644
--- a/deps/v8/src/handles/handles-inl.h
+++ b/deps/v8/src/handles/handles-inl.h
@@ -9,6 +9,7 @@
#include "src/execution/local-isolate.h"
#include "src/handles/handles.h"
#include "src/handles/local-handles-inl.h"
+#include "src/objects/objects.h"
#include "src/sanitizer/msan.h"
namespace v8 {
@@ -25,8 +26,15 @@ HandleBase::HandleBase(Address object, LocalIsolate* isolate)
HandleBase::HandleBase(Address object, LocalHeap* local_heap)
: location_(LocalHandleScope::GetHandle(local_heap, object)) {}
-// Allocate a new handle for the object, do not canonicalize.
+bool HandleBase::is_identical_to(const HandleBase that) const {
+ SLOW_DCHECK((this->location_ == nullptr || this->IsDereferenceAllowed()) &&
+ (that.location_ == nullptr || that.IsDereferenceAllowed()));
+ if (this->location_ == that.location_) return true;
+ if (this->location_ == nullptr || that.location_ == nullptr) return false;
+ return Object(*this->location_) == Object(*that.location_);
+}
+// Allocate a new handle for the object, do not canonicalize.
template <typename T>
Handle<T> Handle<T>::New(T object, Isolate* isolate) {
return Handle(HandleScope::CreateHandle(isolate, object.ptr()));
diff --git a/deps/v8/src/handles/handles.cc b/deps/v8/src/handles/handles.cc
index 85072a375a..aee0e27f20 100644
--- a/deps/v8/src/handles/handles.cc
+++ b/deps/v8/src/handles/handles.cc
@@ -41,6 +41,7 @@ bool HandleBase::IsDereferenceAllowed() const {
RootsTable::IsImmortalImmovable(root_index)) {
return true;
}
+ if (isolate->IsBuiltinsTableHandleLocation(location_)) return true;
LocalHeap* local_heap = LocalHeap::Current();
if (FLAG_local_heaps && local_heap) {
@@ -175,12 +176,12 @@ Address* CanonicalHandleScope::Lookup(Address object) {
return isolate_->root_handle(root_index).location();
}
}
- Address** entry = identity_map_->Get(Object(object));
- if (*entry == nullptr) {
+ auto find_result = identity_map_->FindOrInsert(Object(object));
+ if (!find_result.already_exists) {
// Allocate new handle location.
- *entry = HandleScope::CreateHandle(isolate_, object);
+ *find_result.entry = HandleScope::CreateHandle(isolate_, object);
}
- return *entry;
+ return *find_result.entry;
}
std::unique_ptr<CanonicalHandlesMap>
diff --git a/deps/v8/src/handles/handles.h b/deps/v8/src/handles/handles.h
index 6f45da8483..62f06ce232 100644
--- a/deps/v8/src/handles/handles.h
+++ b/deps/v8/src/handles/handles.h
@@ -44,14 +44,7 @@ class HandleBase {
V8_INLINE explicit HandleBase(Address object, LocalHeap* local_heap);
// Check if this handle refers to the exact same object as the other handle.
- V8_INLINE bool is_identical_to(const HandleBase that) const {
- SLOW_DCHECK((this->location_ == nullptr || this->IsDereferenceAllowed()) &&
- (that.location_ == nullptr || that.IsDereferenceAllowed()));
- if (this->location_ == that.location_) return true;
- if (this->location_ == nullptr || that.location_ == nullptr) return false;
- return *this->location_ == *that.location_;
- }
-
+ V8_INLINE bool is_identical_to(const HandleBase that) const;
V8_INLINE bool is_null() const { return location_ == nullptr; }
// Returns the raw address where this handle is stored. This should only be
diff --git a/deps/v8/src/handles/maybe-handles-inl.h b/deps/v8/src/handles/maybe-handles-inl.h
index d4989d9456..62c00dde34 100644
--- a/deps/v8/src/handles/maybe-handles-inl.h
+++ b/deps/v8/src/handles/maybe-handles-inl.h
@@ -17,6 +17,10 @@ template <typename T>
MaybeHandle<T>::MaybeHandle(T object, Isolate* isolate)
: MaybeHandle(handle(object, isolate)) {}
+template <typename T>
+MaybeHandle<T>::MaybeHandle(T object, LocalHeap* local_heap)
+ : MaybeHandle(handle(object, local_heap)) {}
+
MaybeObjectHandle::MaybeObjectHandle(MaybeObject object, Isolate* isolate) {
HeapObject heap_object;
DCHECK(!object->IsCleared());
@@ -29,6 +33,19 @@ MaybeObjectHandle::MaybeObjectHandle(MaybeObject object, Isolate* isolate) {
}
}
+MaybeObjectHandle::MaybeObjectHandle(MaybeObject object,
+ LocalHeap* local_heap) {
+ HeapObject heap_object;
+ DCHECK(!object->IsCleared());
+ if (object->GetHeapObjectIfWeak(&heap_object)) {
+ handle_ = handle(heap_object, local_heap);
+ reference_type_ = HeapObjectReferenceType::WEAK;
+ } else {
+ handle_ = handle(object->cast<Object>(), local_heap);
+ reference_type_ = HeapObjectReferenceType::STRONG;
+ }
+}
+
MaybeObjectHandle::MaybeObjectHandle(Handle<Object> object)
: reference_type_(HeapObjectReferenceType::STRONG), handle_(object) {}
@@ -36,6 +53,10 @@ MaybeObjectHandle::MaybeObjectHandle(Object object, Isolate* isolate)
: reference_type_(HeapObjectReferenceType::STRONG),
handle_(object, isolate) {}
+MaybeObjectHandle::MaybeObjectHandle(Object object, LocalHeap* local_heap)
+ : reference_type_(HeapObjectReferenceType::STRONG),
+ handle_(object, local_heap) {}
+
MaybeObjectHandle::MaybeObjectHandle(Object object,
HeapObjectReferenceType reference_type,
Isolate* isolate)
@@ -53,6 +74,15 @@ MaybeObjectHandle MaybeObjectHandle::Weak(Object object, Isolate* isolate) {
return MaybeObjectHandle(object, HeapObjectReferenceType::WEAK, isolate);
}
+bool MaybeObjectHandle::is_identical_to(const MaybeObjectHandle& other) const {
+ Handle<Object> this_handle;
+ Handle<Object> other_handle;
+ return reference_type_ == other.reference_type_ &&
+ handle_.ToHandle(&this_handle) ==
+ other.handle_.ToHandle(&other_handle) &&
+ this_handle.is_identical_to(other_handle);
+}
+
MaybeObject MaybeObjectHandle::operator*() const {
if (reference_type_ == HeapObjectReferenceType::WEAK) {
return HeapObjectReference::Weak(*handle_.ToHandleChecked());
@@ -77,6 +107,10 @@ inline MaybeObjectHandle handle(MaybeObject object, Isolate* isolate) {
return MaybeObjectHandle(object, isolate);
}
+inline MaybeObjectHandle handle(MaybeObject object, LocalHeap* local_heap) {
+ return MaybeObjectHandle(object, local_heap);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handles/maybe-handles.h b/deps/v8/src/handles/maybe-handles.h
index d804374088..15397ef0df 100644
--- a/deps/v8/src/handles/maybe-handles.h
+++ b/deps/v8/src/handles/maybe-handles.h
@@ -45,6 +45,7 @@ class MaybeHandle final {
: location_(maybe_handle.location_) {}
V8_INLINE MaybeHandle(T object, Isolate* isolate);
+ V8_INLINE MaybeHandle(T object, LocalHeap* local_heap);
V8_INLINE void Assert() const { DCHECK_NOT_NULL(location_); }
V8_INLINE void Check() const { CHECK_NOT_NULL(location_); }
@@ -91,6 +92,8 @@ class MaybeObjectHandle {
: reference_type_(HeapObjectReferenceType::STRONG) {}
inline MaybeObjectHandle(MaybeObject object, Isolate* isolate);
inline MaybeObjectHandle(Object object, Isolate* isolate);
+ inline MaybeObjectHandle(MaybeObject object, LocalHeap* local_heap);
+ inline MaybeObjectHandle(Object object, LocalHeap* local_heap);
inline explicit MaybeObjectHandle(Handle<Object> object);
static inline MaybeObjectHandle Weak(Object object, Isolate* isolate);
@@ -100,15 +103,7 @@ class MaybeObjectHandle {
inline MaybeObject operator->() const;
inline Handle<Object> object() const;
- bool is_identical_to(const MaybeObjectHandle& other) const {
- Handle<Object> this_handle;
- Handle<Object> other_handle;
- return reference_type_ == other.reference_type_ &&
- handle_.ToHandle(&this_handle) ==
- other.handle_.ToHandle(&other_handle) &&
- this_handle.is_identical_to(other_handle);
- }
-
+ inline bool is_identical_to(const MaybeObjectHandle& other) const;
bool is_null() const { return handle_.is_null(); }
private:
diff --git a/deps/v8/src/heap/DIR_METADATA b/deps/v8/src/heap/DIR_METADATA
new file mode 100644
index 0000000000..ff55846b31
--- /dev/null
+++ b/deps/v8/src/heap/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>GC"
+} \ No newline at end of file
diff --git a/deps/v8/src/heap/OWNERS b/deps/v8/src/heap/OWNERS
index 51a6b41416..95beec5ca2 100644
--- a/deps/v8/src/heap/OWNERS
+++ b/deps/v8/src/heap/OWNERS
@@ -7,5 +7,3 @@ ulan@chromium.org
per-file *factory*=leszeks@chromium.org
per-file read-only-*=delphick@chromium.org
-
-# COMPONENT: Blink>JavaScript>GC
diff --git a/deps/v8/src/heap/array-buffer-sweeper.cc b/deps/v8/src/heap/array-buffer-sweeper.cc
index ab75e65166..5bc8fcb720 100644
--- a/deps/v8/src/heap/array-buffer-sweeper.cc
+++ b/deps/v8/src/heap/array-buffer-sweeper.cc
@@ -3,6 +3,9 @@
// found in the LICENSE file.
#include "src/heap/array-buffer-sweeper.h"
+
+#include <atomic>
+
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/objects/js-array-buffer.h"
@@ -69,27 +72,25 @@ void ArrayBufferSweeper::EnsureFinished() {
if (!sweeping_in_progress_) return;
TryAbortResult abort_result =
- heap_->isolate()->cancelable_task_manager()->TryAbort(job_.id);
+ heap_->isolate()->cancelable_task_manager()->TryAbort(job_->id_);
switch (abort_result) {
case TryAbortResult::kTaskAborted: {
- Sweep();
+ job_->Sweep();
Merge();
break;
}
case TryAbortResult::kTaskRemoved: {
- CHECK_NE(job_.state, SweepingState::Uninitialized);
- if (job_.state == SweepingState::Prepared) Sweep();
- Merge();
+ if (job_->state_ == SweepingState::kInProgress) job_->Sweep();
+ if (job_->state_ == SweepingState::kDone) Merge();
break;
}
case TryAbortResult::kTaskRunning: {
base::MutexGuard guard(&sweeping_mutex_);
- CHECK_NE(job_.state, SweepingState::Uninitialized);
// Wait until task is finished with its work.
- while (job_.state != SweepingState::Swept) {
+ while (job_->state_ != SweepingState::kDone) {
job_finished_.Wait(&sweeping_mutex_);
}
Merge();
@@ -104,27 +105,34 @@ void ArrayBufferSweeper::EnsureFinished() {
sweeping_in_progress_ = false;
}
-void ArrayBufferSweeper::DecrementExternalMemoryCounters() {
- size_t bytes = freed_bytes_.load(std::memory_order_relaxed);
- if (bytes == 0) return;
-
- while (!freed_bytes_.compare_exchange_weak(bytes, 0)) {
- // empty body
+void ArrayBufferSweeper::AdjustCountersAndMergeIfPossible() {
+ if (sweeping_in_progress_) {
+ DCHECK(job_.has_value());
+ if (job_->state_ == SweepingState::kDone) {
+ Merge();
+ sweeping_in_progress_ = false;
+ } else {
+ DecrementExternalMemoryCounters();
+ }
}
+}
- if (bytes == 0) return;
+void ArrayBufferSweeper::DecrementExternalMemoryCounters() {
+ size_t freed_bytes = freed_bytes_.exchange(0, std::memory_order_relaxed);
- heap_->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kArrayBuffer, bytes);
- heap_->update_external_memory(-static_cast<int64_t>(bytes));
+ if (freed_bytes > 0) {
+ heap_->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer, freed_bytes);
+ heap_->update_external_memory(-static_cast<int64_t>(freed_bytes));
+ }
}
void ArrayBufferSweeper::RequestSweepYoung() {
- RequestSweep(SweepingScope::Young);
+ RequestSweep(SweepingScope::kYoung);
}
void ArrayBufferSweeper::RequestSweepFull() {
- RequestSweep(SweepingScope::Full);
+ RequestSweep(SweepingScope::kFull);
}
size_t ArrayBufferSweeper::YoungBytes() { return young_bytes_; }
@@ -134,7 +142,7 @@ size_t ArrayBufferSweeper::OldBytes() { return old_bytes_; }
void ArrayBufferSweeper::RequestSweep(SweepingScope scope) {
DCHECK(!sweeping_in_progress_);
- if (young_.IsEmpty() && (old_.IsEmpty() || scope == SweepingScope::Young))
+ if (young_.IsEmpty() && (old_.IsEmpty() || scope == SweepingScope::kYoung))
return;
if (!heap_->IsTearingDown() && !heap_->ShouldReduceMemory() &&
@@ -146,42 +154,45 @@ void ArrayBufferSweeper::RequestSweep(SweepingScope scope) {
heap_->tracer(),
GCTracer::BackgroundScope::BACKGROUND_ARRAY_BUFFER_SWEEP);
base::MutexGuard guard(&sweeping_mutex_);
- Sweep();
+ job_->Sweep();
job_finished_.NotifyAll();
});
- job_.id = task->id();
+ job_->id_ = task->id();
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
sweeping_in_progress_ = true;
} else {
Prepare(scope);
- Sweep();
+ job_->Sweep();
Merge();
DecrementExternalMemoryCounters();
}
}
void ArrayBufferSweeper::Prepare(SweepingScope scope) {
- CHECK_EQ(job_.state, SweepingState::Uninitialized);
+ DCHECK(!job_.has_value());
- if (scope == SweepingScope::Young) {
- job_ =
- SweepingJob::Prepare(young_, ArrayBufferList(), SweepingScope::Young);
+ if (scope == SweepingScope::kYoung) {
+ job_.emplace(this, young_, ArrayBufferList(), SweepingScope::kYoung);
young_.Reset();
+ young_bytes_ = 0;
} else {
- CHECK_EQ(scope, SweepingScope::Full);
- job_ = SweepingJob::Prepare(young_, old_, SweepingScope::Full);
+ CHECK_EQ(scope, SweepingScope::kFull);
+ job_.emplace(this, young_, old_, SweepingScope::kFull);
young_.Reset();
old_.Reset();
+ young_bytes_ = old_bytes_ = 0;
}
}
void ArrayBufferSweeper::Merge() {
- CHECK_EQ(job_.state, SweepingState::Swept);
- young_.Append(&job_.young);
- old_.Append(&job_.old);
+ DCHECK(job_.has_value());
+ CHECK_EQ(job_->state_, SweepingState::kDone);
+ young_.Append(&job_->young_);
+ old_.Append(&job_->old_);
young_bytes_ = young_.Bytes();
old_bytes_ = old_.Bytes();
- job_.state = SweepingState::Uninitialized;
+
+ job_.reset();
}
void ArrayBufferSweeper::ReleaseAll() {
@@ -215,6 +226,7 @@ void ArrayBufferSweeper::Append(JSArrayBuffer object,
old_bytes_ += bytes;
}
+ AdjustCountersAndMergeIfPossible();
DecrementExternalMemoryCounters();
IncrementExternalMemoryCounters(bytes);
}
@@ -226,42 +238,34 @@ void ArrayBufferSweeper::IncrementExternalMemoryCounters(size_t bytes) {
->AdjustAmountOfExternalAllocatedMemory(static_cast<int64_t>(bytes));
}
-ArrayBufferSweeper::SweepingJob::SweepingJob()
- : state(SweepingState::Uninitialized) {}
-
-ArrayBufferSweeper::SweepingJob ArrayBufferSweeper::SweepingJob::Prepare(
- ArrayBufferList young, ArrayBufferList old, SweepingScope scope) {
- SweepingJob job;
- job.young = young;
- job.old = old;
- job.scope = scope;
- job.id = 0;
- job.state = SweepingState::Prepared;
- return job;
+void ArrayBufferSweeper::IncrementFreedBytes(size_t bytes) {
+ if (bytes == 0) return;
+ freed_bytes_.fetch_add(bytes, std::memory_order_relaxed);
}
-void ArrayBufferSweeper::Sweep() {
- CHECK_EQ(job_.state, SweepingState::Prepared);
+void ArrayBufferSweeper::SweepingJob::Sweep() {
+ CHECK_EQ(state_, SweepingState::kInProgress);
- if (job_.scope == SweepingScope::Young) {
+ if (scope_ == SweepingScope::kYoung) {
SweepYoung();
} else {
- CHECK_EQ(job_.scope, SweepingScope::Full);
+ CHECK_EQ(scope_, SweepingScope::kFull);
SweepFull();
}
- job_.state = SweepingState::Swept;
+ state_ = SweepingState::kDone;
}
-void ArrayBufferSweeper::SweepFull() {
- CHECK_EQ(job_.scope, SweepingScope::Full);
- ArrayBufferList promoted = SweepListFull(&job_.young);
- ArrayBufferList survived = SweepListFull(&job_.old);
+void ArrayBufferSweeper::SweepingJob::SweepFull() {
+ CHECK_EQ(scope_, SweepingScope::kFull);
+ ArrayBufferList promoted = SweepListFull(&young_);
+ ArrayBufferList survived = SweepListFull(&old_);
- job_.old = promoted;
- job_.old.Append(&survived);
+ old_ = promoted;
+ old_.Append(&survived);
}
-ArrayBufferList ArrayBufferSweeper::SweepListFull(ArrayBufferList* list) {
+ArrayBufferList ArrayBufferSweeper::SweepingJob::SweepListFull(
+ ArrayBufferList* list) {
ArrayBufferExtension* current = list->head_;
ArrayBufferList survivor_list;
@@ -271,7 +275,7 @@ ArrayBufferList ArrayBufferSweeper::SweepListFull(ArrayBufferList* list) {
if (!current->IsMarked()) {
size_t bytes = current->accounting_length();
delete current;
- IncrementFreedBytes(bytes);
+ sweeper_->IncrementFreedBytes(bytes);
} else {
current->Unmark();
survivor_list.Append(current);
@@ -284,9 +288,9 @@ ArrayBufferList ArrayBufferSweeper::SweepListFull(ArrayBufferList* list) {
return survivor_list;
}
-void ArrayBufferSweeper::SweepYoung() {
- CHECK_EQ(job_.scope, SweepingScope::Young);
- ArrayBufferExtension* current = job_.young.head_;
+void ArrayBufferSweeper::SweepingJob::SweepYoung() {
+ CHECK_EQ(scope_, SweepingScope::kYoung);
+ ArrayBufferExtension* current = young_.head_;
ArrayBufferList new_young;
ArrayBufferList new_old;
@@ -297,7 +301,7 @@ void ArrayBufferSweeper::SweepYoung() {
if (!current->IsYoungMarked()) {
size_t bytes = current->accounting_length();
delete current;
- IncrementFreedBytes(bytes);
+ sweeper_->IncrementFreedBytes(bytes);
} else if (current->IsYoungPromoted()) {
current->YoungUnmark();
new_old.Append(current);
@@ -309,13 +313,8 @@ void ArrayBufferSweeper::SweepYoung() {
current = next;
}
- job_.old = new_old;
- job_.young = new_young;
-}
-
-void ArrayBufferSweeper::IncrementFreedBytes(size_t bytes) {
- if (bytes == 0) return;
- freed_bytes_.fetch_add(bytes);
+ old_ = new_old;
+ young_ = new_young;
}
} // namespace internal
diff --git a/deps/v8/src/heap/array-buffer-sweeper.h b/deps/v8/src/heap/array-buffer-sweeper.h
index 5cedb2b8f8..963682d82f 100644
--- a/deps/v8/src/heap/array-buffer-sweeper.h
+++ b/deps/v8/src/heap/array-buffer-sweeper.h
@@ -68,37 +68,46 @@ class ArrayBufferSweeper {
size_t OldBytes();
private:
- enum class SweepingScope { Young, Full };
+ enum class SweepingScope { kYoung, kFull };
- enum class SweepingState { Uninitialized, Prepared, Swept };
+ enum class SweepingState { kInProgress, kDone };
struct SweepingJob {
- CancelableTaskManager::Id id;
- SweepingState state;
- ArrayBufferList young;
- ArrayBufferList old;
- SweepingScope scope;
-
- SweepingJob();
-
- static SweepingJob Prepare(ArrayBufferList young, ArrayBufferList old,
- SweepingScope scope);
- } job_;
+ ArrayBufferSweeper* sweeper_;
+ CancelableTaskManager::Id id_;
+ std::atomic<SweepingState> state_;
+ ArrayBufferList young_;
+ ArrayBufferList old_;
+ SweepingScope scope_;
+
+ SweepingJob(ArrayBufferSweeper* sweeper, ArrayBufferList young,
+ ArrayBufferList old, SweepingScope scope)
+ : sweeper_(sweeper),
+ id_(0),
+ state_(SweepingState::kInProgress),
+ young_(young),
+ old_(old),
+ scope_(scope) {}
+
+ void Sweep();
+ void SweepYoung();
+ void SweepFull();
+ ArrayBufferList SweepListFull(ArrayBufferList* list);
+ };
+
+ base::Optional<SweepingJob> job_;
void Merge();
+ void AdjustCountersAndMergeIfPossible();
void DecrementExternalMemoryCounters();
void IncrementExternalMemoryCounters(size_t bytes);
void IncrementFreedBytes(size_t bytes);
+ void IncrementFreedYoungBytes(size_t bytes);
void RequestSweep(SweepingScope sweeping_task);
void Prepare(SweepingScope sweeping_task);
- void Sweep();
- void SweepYoung();
- void SweepFull();
- ArrayBufferList SweepListFull(ArrayBufferList* list);
-
ArrayBufferList SweepYoungGen();
void SweepOldGen(ArrayBufferExtension* extension);
diff --git a/deps/v8/src/heap/base/stack.cc b/deps/v8/src/heap/base/stack.cc
index cd28444474..939487ca77 100644
--- a/deps/v8/src/heap/base/stack.cc
+++ b/deps/v8/src/heap/base/stack.cc
@@ -20,9 +20,19 @@ extern "C" void PushAllRegistersAndIterateStack(const Stack*, StackVisitor*,
Stack::Stack(const void* stack_start) : stack_start_(stack_start) {}
bool Stack::IsOnStack(void* slot) const {
- void* raw_slot = v8::base::Stack::GetStackSlot(slot);
- return v8::base::Stack::GetCurrentStackPosition() <= raw_slot &&
- raw_slot <= stack_start_;
+#ifdef V8_USE_ADDRESS_SANITIZER
+ // If the slot is part of a fake frame, then it is definitely on the stack.
+ void* real_frame = __asan_addr_is_in_fake_stack(
+ __asan_get_current_fake_stack(), reinterpret_cast<void*>(slot), nullptr,
+ nullptr);
+ if (real_frame) {
+ return true;
+ }
+ // Fall through as there is still a regular stack present even when running
+ // with ASAN fake stacks.
+#endif // V8_USE_ADDRESS_SANITIZER
+ return v8::base::Stack::GetCurrentStackPosition() <= slot &&
+ slot <= stack_start_;
}
namespace {
diff --git a/deps/v8/src/heap/base/worklist.h b/deps/v8/src/heap/base/worklist.h
index be2ecf158b..e2d33616ad 100644
--- a/deps/v8/src/heap/base/worklist.h
+++ b/deps/v8/src/heap/base/worklist.h
@@ -285,6 +285,9 @@ class Worklist<EntryType, SegmentSize>::Local {
void Publish();
void Merge(Worklist<EntryType, SegmentSize>::Local* other);
+ bool IsEmpty() const;
+ void Clear();
+
size_t PushSegmentSize() const { return push_segment_->Size(); }
private:
@@ -445,6 +448,17 @@ bool Worklist<EntryType, SegmentSize>::Local::StealPopSegment() {
return false;
}
+template <typename EntryType, uint16_t SegmentSize>
+bool Worklist<EntryType, SegmentSize>::Local::IsEmpty() const {
+ return push_segment_->IsEmpty() && pop_segment_->IsEmpty();
+}
+
+template <typename EntryType, uint16_t SegmentSize>
+void Worklist<EntryType, SegmentSize>::Local::Clear() {
+ push_segment_->Clear();
+ pop_segment_->Clear();
+}
+
} // namespace base
} // namespace heap
diff --git a/deps/v8/src/heap/basic-memory-chunk.h b/deps/v8/src/heap/basic-memory-chunk.h
index d6c3e5f29a..e102349fa9 100644
--- a/deps/v8/src/heap/basic-memory-chunk.h
+++ b/deps/v8/src/heap/basic-memory-chunk.h
@@ -283,13 +283,11 @@ class BasicMemoryChunk {
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static BasicMemoryChunk* FromAddress(Address a) {
- DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(a));
}
// Only works if the object is in the first kPageSize of the MemoryChunk.
static BasicMemoryChunk* FromHeapObject(HeapObject o) {
- DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(o.ptr()));
}
diff --git a/deps/v8/src/heap/code-object-registry.cc b/deps/v8/src/heap/code-object-registry.cc
index ebaa29fbae..f8ec349409 100644
--- a/deps/v8/src/heap/code-object-registry.cc
+++ b/deps/v8/src/heap/code-object-registry.cc
@@ -12,63 +12,57 @@ namespace v8 {
namespace internal {
void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
- auto result = code_object_registry_newly_allocated_.insert(code);
- USE(result);
- DCHECK(result.second);
+ if (is_sorted_) {
+ is_sorted_ =
+ (code_object_registry_.empty() || code_object_registry_.back() < code);
+ }
+ code_object_registry_.push_back(code);
}
void CodeObjectRegistry::RegisterAlreadyExistingCodeObject(Address code) {
- code_object_registry_already_existing_.push_back(code);
+ DCHECK(is_sorted_);
+ DCHECK(code_object_registry_.empty() || code_object_registry_.back() < code);
+ code_object_registry_.push_back(code);
}
void CodeObjectRegistry::Clear() {
- code_object_registry_already_existing_.clear();
- code_object_registry_newly_allocated_.clear();
+ code_object_registry_.clear();
+ is_sorted_ = true;
}
void CodeObjectRegistry::Finalize() {
- code_object_registry_already_existing_.shrink_to_fit();
+ DCHECK(is_sorted_);
+ code_object_registry_.shrink_to_fit();
}
bool CodeObjectRegistry::Contains(Address object) const {
- return (code_object_registry_newly_allocated_.find(object) !=
- code_object_registry_newly_allocated_.end()) ||
- (std::binary_search(code_object_registry_already_existing_.begin(),
- code_object_registry_already_existing_.end(),
- object));
+ if (!is_sorted_) {
+ std::sort(code_object_registry_.begin(), code_object_registry_.end());
+ is_sorted_ = true;
+ }
+ return (std::binary_search(code_object_registry_.begin(),
+ code_object_registry_.end(), object));
}
Address CodeObjectRegistry::GetCodeObjectStartFromInnerAddress(
Address address) const {
- // Let's first find the object which comes right before address in the vector
- // of already existing code objects.
- Address already_existing_set_ = 0;
- Address newly_allocated_set_ = 0;
- if (!code_object_registry_already_existing_.empty()) {
- auto it =
- std::upper_bound(code_object_registry_already_existing_.begin(),
- code_object_registry_already_existing_.end(), address);
- if (it != code_object_registry_already_existing_.begin()) {
- already_existing_set_ = *(--it);
- }
- }
-
- // Next, let's find the object which comes right before address in the set
- // of newly allocated code objects.
- if (!code_object_registry_newly_allocated_.empty()) {
- auto it = code_object_registry_newly_allocated_.upper_bound(address);
- if (it != code_object_registry_newly_allocated_.begin()) {
- newly_allocated_set_ = *(--it);
- }
+ if (!is_sorted_) {
+ std::sort(code_object_registry_.begin(), code_object_registry_.end());
+ is_sorted_ = true;
}
- // The code objects which contains address has to be in one of the two
- // data structures.
- DCHECK(already_existing_set_ != 0 || newly_allocated_set_ != 0);
+ // The code registry can't be empty, else the code object can't exist.
+ DCHECK(!code_object_registry_.empty());
- // The address which is closest to the given address is the code object.
- return already_existing_set_ > newly_allocated_set_ ? already_existing_set_
- : newly_allocated_set_;
+ // std::upper_bound returns the first code object strictly greater than
+ // address, so the code object containing the address has to be the previous
+ // one.
+ auto it = std::upper_bound(code_object_registry_.begin(),
+ code_object_registry_.end(), address);
+ // The address has to be contained in a code object, so necessarily the
+ // address can't be smaller than the first code object.
+ DCHECK_NE(it, code_object_registry_.begin());
+ return *(--it);
}
} // namespace internal
diff --git a/deps/v8/src/heap/code-object-registry.h b/deps/v8/src/heap/code-object-registry.h
index beab176625..ae5199903b 100644
--- a/deps/v8/src/heap/code-object-registry.h
+++ b/deps/v8/src/heap/code-object-registry.h
@@ -28,8 +28,10 @@ class V8_EXPORT_PRIVATE CodeObjectRegistry {
Address GetCodeObjectStartFromInnerAddress(Address address) const;
private:
- std::vector<Address> code_object_registry_already_existing_;
- std::set<Address> code_object_registry_newly_allocated_;
+ // A vector of addresses, which may be sorted. This is set to 'mutable' so
+ // that it can be lazily sorted during GetCodeObjectStartFromInnerAddress.
+ mutable std::vector<Address> code_object_registry_;
+ mutable bool is_sorted_ = true;
};
} // namespace internal
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
index bf8b9a64ce..abca2c75f9 100644
--- a/deps/v8/src/heap/code-stats.cc
+++ b/deps/v8/src/heap/code-stats.cc
@@ -194,12 +194,13 @@ void CodeStatistics::CollectCommentStatistics(Isolate* isolate,
EnterComment(isolate, comment_txt, flat_delta);
}
-// Collects code comment statistics
+// Collects code comment statistics.
void CodeStatistics::CollectCodeCommentStatistics(HeapObject obj,
Isolate* isolate) {
// Bytecode objects do not contain RelocInfo. Only process code objects
// for code comment statistics.
if (!obj.IsCode()) {
+ DCHECK(obj.IsBytecodeArray());
return;
}
@@ -214,8 +215,8 @@ void CodeStatistics::CollectCodeCommentStatistics(HeapObject obj,
cit.Next();
}
- DCHECK(0 <= prev_pc_offset && prev_pc_offset <= code.raw_instruction_size());
- delta += static_cast<int>(code.raw_instruction_size() - prev_pc_offset);
+ DCHECK(0 <= prev_pc_offset && prev_pc_offset <= code.InstructionSize());
+ delta += static_cast<int>(code.InstructionSize() - prev_pc_offset);
EnterComment(isolate, "NoComment", delta);
}
#endif
diff --git a/deps/v8/src/heap/collection-barrier.cc b/deps/v8/src/heap/collection-barrier.cc
new file mode 100644
index 0000000000..47a9db882b
--- /dev/null
+++ b/deps/v8/src/heap/collection-barrier.cc
@@ -0,0 +1,100 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/collection-barrier.h"
+
+#include "src/base/platform/time.h"
+#include "src/heap/gc-tracer.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+
+namespace v8 {
+namespace internal {
+
+void CollectionBarrier::ResumeThreadsAwaitingCollection() {
+ base::MutexGuard guard(&mutex_);
+ ClearCollectionRequested();
+ cond_.NotifyAll();
+}
+
+void CollectionBarrier::ShutdownRequested() {
+ base::MutexGuard guard(&mutex_);
+ if (timer_.IsStarted()) timer_.Stop();
+ state_.store(RequestState::kShutdown);
+ cond_.NotifyAll();
+}
+
+class BackgroundCollectionInterruptTask : public CancelableTask {
+ public:
+ explicit BackgroundCollectionInterruptTask(Heap* heap)
+ : CancelableTask(heap->isolate()), heap_(heap) {}
+
+ ~BackgroundCollectionInterruptTask() override = default;
+
+ private:
+ // v8::internal::CancelableTask overrides.
+ void RunInternal() override { heap_->CheckCollectionRequested(); }
+
+ Heap* heap_;
+ DISALLOW_COPY_AND_ASSIGN(BackgroundCollectionInterruptTask);
+};
+
+void CollectionBarrier::AwaitCollectionBackground() {
+ bool first;
+
+ {
+ base::MutexGuard guard(&mutex_);
+ first = FirstCollectionRequest();
+ if (first) timer_.Start();
+ }
+
+ if (first) {
+ // This is the first background thread requesting collection, ask the main
+ // thread for GC.
+ ActivateStackGuardAndPostTask();
+ }
+
+ BlockUntilCollected();
+}
+
+void CollectionBarrier::StopTimeToCollectionTimer() {
+ base::MutexGuard guard(&mutex_);
+ RequestState old_state = state_.exchange(RequestState::kCollectionStarted,
+ std::memory_order_relaxed);
+ if (old_state == RequestState::kCollectionRequested) {
+ DCHECK(timer_.IsStarted());
+ base::TimeDelta delta = timer_.Elapsed();
+ TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "V8.TimeToCollection", TRACE_EVENT_SCOPE_THREAD,
+ "duration", delta.InMillisecondsF());
+ heap_->isolate()->counters()->time_to_collection()->AddTimedSample(delta);
+ timer_.Stop();
+ } else {
+ DCHECK_EQ(old_state, RequestState::kDefault);
+ DCHECK(!timer_.IsStarted());
+ }
+}
+
+void CollectionBarrier::ActivateStackGuardAndPostTask() {
+ Isolate* isolate = heap_->isolate();
+ ExecutionAccess access(isolate);
+ isolate->stack_guard()->RequestGC();
+ auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
+ reinterpret_cast<v8::Isolate*>(isolate));
+ taskrunner->PostTask(
+ std::make_unique<BackgroundCollectionInterruptTask>(heap_));
+}
+
+void CollectionBarrier::BlockUntilCollected() {
+ TRACE_BACKGROUND_GC(heap_->tracer(),
+ GCTracer::BackgroundScope::BACKGROUND_COLLECTION);
+ base::MutexGuard guard(&mutex_);
+
+ while (CollectionRequested()) {
+ cond_.Wait(&mutex_);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/collection-barrier.h b/deps/v8/src/heap/collection-barrier.h
new file mode 100644
index 0000000000..418f93ce04
--- /dev/null
+++ b/deps/v8/src/heap/collection-barrier.h
@@ -0,0 +1,93 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_COLLECTION_BARRIER_H_
+#define V8_HEAP_COLLECTION_BARRIER_H_
+
+#include <atomic>
+
+#include "src/base/optional.h"
+#include "src/base/platform/elapsed-timer.h"
+#include "src/base/platform/mutex.h"
+#include "src/logging/counters.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+// This class stops and resumes all background threads waiting for GC.
+class CollectionBarrier {
+ Heap* heap_;
+ base::Mutex mutex_;
+ base::ConditionVariable cond_;
+ base::ElapsedTimer timer_;
+
+ enum class RequestState {
+ // Default state, no collection requested and tear down wasn't initated
+ // yet.
+ kDefault,
+
+ // Collection was already requested
+ kCollectionRequested,
+
+ // Collection was already started
+ kCollectionStarted,
+
+ // This state is reached after isolate starts to shut down. The main
+ // thread can't perform any GCs anymore, so all allocations need to be
+ // allowed from here on until background thread finishes.
+ kShutdown,
+ };
+
+ // The current state.
+ std::atomic<RequestState> state_;
+
+ // Request GC by activating stack guards and posting a task to perform the
+ // GC.
+ void ActivateStackGuardAndPostTask();
+
+ // Returns true when state was successfully updated from kDefault to
+ // kCollection.
+ bool FirstCollectionRequest() {
+ RequestState expected = RequestState::kDefault;
+ return state_.compare_exchange_strong(expected,
+ RequestState::kCollectionRequested);
+ }
+
+ // Sets state back to kDefault - invoked at end of GC.
+ void ClearCollectionRequested() {
+ RequestState old_state =
+ state_.exchange(RequestState::kDefault, std::memory_order_relaxed);
+ USE(old_state);
+ DCHECK_EQ(old_state, RequestState::kCollectionStarted);
+ }
+
+ public:
+ explicit CollectionBarrier(Heap* heap)
+ : heap_(heap), state_(RequestState::kDefault) {}
+
+ // Checks whether any background thread requested GC.
+ bool CollectionRequested() {
+ return state_.load(std::memory_order_relaxed) ==
+ RequestState::kCollectionRequested;
+ }
+
+ void StopTimeToCollectionTimer();
+ void BlockUntilCollected();
+
+ // Resumes threads waiting for collection.
+ void ResumeThreadsAwaitingCollection();
+
+ // Sets current state to kShutdown.
+ void ShutdownRequested();
+
+ // This is the method use by background threads to request and wait for GC.
+ void AwaitCollectionBackground();
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_COLLECTION_BARRIER_H_
diff --git a/deps/v8/src/heap/concurrent-allocator.cc b/deps/v8/src/heap/concurrent-allocator.cc
index 5db9159f14..82975fa339 100644
--- a/deps/v8/src/heap/concurrent-allocator.cc
+++ b/deps/v8/src/heap/concurrent-allocator.cc
@@ -9,6 +9,7 @@
#include "src/handles/persistent-handles.h"
#include "src/heap/concurrent-allocator-inl.h"
#include "src/heap/local-heap-inl.h"
+#include "src/heap/local-heap.h"
#include "src/heap/marking.h"
#include "src/heap/memory-chunk.h"
@@ -17,7 +18,8 @@ namespace internal {
void StressConcurrentAllocatorTask::RunInternal() {
Heap* heap = isolate_->heap();
- LocalHeap local_heap(heap);
+ LocalHeap local_heap(heap, ThreadKind::kBackground);
+ UnparkedScope unparked_scope(&local_heap);
const int kNumIterations = 2000;
const int kSmallObjectSize = 10 * kTaggedSize;
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index b0c3e50951..f2dfad057b 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -347,27 +347,37 @@ FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
return FixedArray::unchecked_cast(object);
}
-class ConcurrentMarking::Task : public CancelableTask {
+// The Deserializer changes the map from StrongDescriptorArray to
+// DescriptorArray
+template <>
+StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
+ return StrongDescriptorArray::unchecked_cast(DescriptorArray::cast(object));
+}
+
+class ConcurrentMarking::JobTask : public v8::JobTask {
public:
- Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
- TaskState* task_state, int task_id)
- : CancelableTask(isolate),
- concurrent_marking_(concurrent_marking),
- task_state_(task_state),
- task_id_(task_id) {}
+ JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
+ bool is_forced_gc)
+ : concurrent_marking_(concurrent_marking),
+ mark_compact_epoch_(mark_compact_epoch),
+ is_forced_gc_(is_forced_gc) {}
- ~Task() override = default;
+ ~JobTask() override = default;
- private:
- // v8::internal::CancelableTask overrides.
- void RunInternal() override {
- concurrent_marking_->Run(task_id_, task_state_);
+ // v8::JobTask overrides.
+ void Run(JobDelegate* delegate) override {
+ concurrent_marking_->Run(delegate, mark_compact_epoch_, is_forced_gc_);
}
+ size_t GetMaxConcurrency(size_t worker_count) const override {
+ return concurrent_marking_->GetMaxConcurrency(worker_count);
+ }
+
+ private:
ConcurrentMarking* concurrent_marking_;
- TaskState* task_state_;
- int task_id_;
- DISALLOW_COPY_AND_ASSIGN(Task);
+ const unsigned mark_compact_epoch_;
+ const bool is_forced_gc_;
+ DISALLOW_COPY_AND_ASSIGN(JobTask);
};
ConcurrentMarking::ConcurrentMarking(Heap* heap,
@@ -376,22 +386,29 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
: heap_(heap),
marking_worklists_(marking_worklists),
weak_objects_(weak_objects) {
-// The runtime flag should be set only if the compile time flag was set.
-#ifndef V8_CONCURRENT_MARKING
+#ifndef V8_ATOMIC_MARKING_STATE
+ // Concurrent and parallel marking require atomic marking state.
CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
#endif
+#ifndef V8_ATOMIC_OBJECT_FIELD_WRITES
+ // Concurrent marking requires atomic object field writes.
+ CHECK(!FLAG_concurrent_marking);
+#endif
}
-void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
+void ConcurrentMarking::Run(JobDelegate* delegate, unsigned mark_compact_epoch,
+ bool is_forced_gc) {
TRACE_BACKGROUND_GC(heap_->tracer(),
GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
+ uint8_t task_id = delegate->GetTaskId() + 1;
+ TaskState* task_state = &task_state_[task_id];
MarkingWorklists::Local local_marking_worklists(marking_worklists_);
ConcurrentMarkingVisitor visitor(
task_id, &local_marking_worklists, weak_objects_, heap_,
- task_state->mark_compact_epoch, Heap::GetBytecodeFlushMode(),
- heap_->local_embedder_heap_tracer()->InUse(), task_state->is_forced_gc,
+ mark_compact_epoch, Heap::GetBytecodeFlushMode(),
+ heap_->local_embedder_heap_tracer()->InUse(), is_forced_gc,
&task_state->memory_chunk_data);
NativeContextInferrer& native_context_inferrer =
task_state->native_context_inferrer;
@@ -457,7 +474,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
marked_bytes += current_marked_bytes;
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
marked_bytes);
- if (task_state->preemption_request) {
+ if (delegate->ShouldYield()) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ConcurrentMarking::Run Preempted");
break;
@@ -492,13 +509,6 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
if (ephemeron_marked) {
set_ephemeron_marked(true);
}
-
- {
- base::MutexGuard guard(&pending_lock_);
- is_pending_[task_id] = false;
- --pending_task_count_;
- pending_condition_.NotifyAll();
- }
}
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp(
@@ -507,109 +517,71 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
}
-void ConcurrentMarking::ScheduleTasks() {
+size_t ConcurrentMarking::GetMaxConcurrency(size_t worker_count) {
+ size_t marking_items = marking_worklists_->shared()->Size();
+ for (auto& worklist : marking_worklists_->context_worklists())
+ marking_items += worklist.worklist->Size();
+ return std::min<size_t>(
+ kMaxTasks,
+ worker_count + std::max<size_t>(
+ {marking_items,
+ weak_objects_->discovered_ephemerons.GlobalPoolSize(),
+ weak_objects_->current_ephemerons.GlobalPoolSize()}));
+}
+
+void ConcurrentMarking::ScheduleJob(TaskPriority priority) {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
DCHECK(!heap_->IsTearingDown());
- base::MutexGuard guard(&pending_lock_);
- if (total_task_count_ == 0) {
- static const int num_cores =
- V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
-#if defined(V8_OS_MACOSX)
- // Mac OSX 10.11 and prior seems to have trouble when doing concurrent
- // marking on competing hyper-threads (regresses Octane/Splay). As such,
- // only use num_cores/2, leaving one of those for the main thread.
- // TODO(ulan): Use all cores on Mac 10.12+.
- total_task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
-#else // defined(V8_OS_MACOSX)
- // On other platforms use all logical cores, leaving one for the main
- // thread.
- total_task_count_ = Max(1, Min(kMaxTasks, num_cores - 2));
-#endif // defined(V8_OS_MACOSX)
- if (FLAG_gc_experiment_reduce_concurrent_marking_tasks) {
- // Use at most half of the cores in the experiment.
- total_task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
- }
- DCHECK_LE(total_task_count_, kMaxTasks);
- }
- // Task id 0 is for the main thread.
- for (int i = 1; i <= total_task_count_; i++) {
- if (!is_pending_[i]) {
- if (FLAG_trace_concurrent_marking) {
- heap_->isolate()->PrintWithTimestamp(
- "Scheduling concurrent marking task %d\n", i);
- }
- task_state_[i].preemption_request = false;
- task_state_[i].mark_compact_epoch =
- heap_->mark_compact_collector()->epoch();
- task_state_[i].is_forced_gc = heap_->is_current_gc_forced();
- is_pending_[i] = true;
- ++pending_task_count_;
- auto task =
- std::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
- cancelable_id_[i] = task->id();
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
- }
- }
- DCHECK_EQ(total_task_count_, pending_task_count_);
+ DCHECK(!job_handle_ || !job_handle_->IsValid());
+
+ job_handle_ = V8::GetCurrentPlatform()->PostJob(
+ priority,
+ std::make_unique<JobTask>(this, heap_->mark_compact_collector()->epoch(),
+ heap_->is_current_gc_forced()));
+ DCHECK(job_handle_->IsValid());
}
-void ConcurrentMarking::RescheduleTasksIfNeeded() {
+void ConcurrentMarking::RescheduleJobIfNeeded(TaskPriority priority) {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
if (heap_->IsTearingDown()) return;
- {
- base::MutexGuard guard(&pending_lock_);
- // The total task count is initialized in ScheduleTasks from
- // NumberOfWorkerThreads of the platform.
- if (total_task_count_ > 0 && pending_task_count_ == total_task_count_) {
- return;
- }
+
+ if (marking_worklists_->shared()->IsEmpty() &&
+ weak_objects_->current_ephemerons.IsGlobalPoolEmpty() &&
+ weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
+ return;
}
- if (!marking_worklists_->shared()->IsEmpty() ||
- !weak_objects_->current_ephemerons.IsGlobalPoolEmpty() ||
- !weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
- ScheduleTasks();
+ if (!job_handle_ || !job_handle_->IsValid()) {
+ ScheduleJob(priority);
+ } else {
+ if (priority != TaskPriority::kUserVisible)
+ job_handle_->UpdatePriority(priority);
+ job_handle_->NotifyConcurrencyIncrease();
}
}
-bool ConcurrentMarking::Stop(StopRequest stop_request) {
+void ConcurrentMarking::Join() {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
- base::MutexGuard guard(&pending_lock_);
-
- if (pending_task_count_ == 0) return false;
-
- if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
- CancelableTaskManager* task_manager =
- heap_->isolate()->cancelable_task_manager();
- for (int i = 1; i <= total_task_count_; i++) {
- if (is_pending_[i]) {
- if (task_manager->TryAbort(cancelable_id_[i]) ==
- TryAbortResult::kTaskAborted) {
- is_pending_[i] = false;
- --pending_task_count_;
- } else if (stop_request == StopRequest::PREEMPT_TASKS) {
- task_state_[i].preemption_request = true;
- }
- }
- }
- }
- while (pending_task_count_ > 0) {
- pending_condition_.Wait(&pending_lock_);
- }
- for (int i = 1; i <= total_task_count_; i++) {
- DCHECK(!is_pending_[i]);
- }
+ if (!job_handle_ || !job_handle_->IsValid()) return;
+ job_handle_->Join();
+}
+
+bool ConcurrentMarking::Pause() {
+ DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
+ if (!job_handle_ || !job_handle_->IsValid()) return false;
+
+ job_handle_->Cancel();
return true;
}
bool ConcurrentMarking::IsStopped() {
if (!FLAG_concurrent_marking) return true;
- base::MutexGuard guard(&pending_lock_);
- return pending_task_count_ == 0;
+ return !job_handle_ || !job_handle_->IsValid();
}
void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) {
- for (int i = 1; i <= total_task_count_; i++) {
+ DCHECK(!job_handle_ || !job_handle_->IsValid());
+ for (int i = 1; i <= kMaxTasks; i++) {
main_stats->Merge(task_state_[i].native_context_stats);
task_state_[i].native_context_stats.Clear();
}
@@ -617,8 +589,8 @@ void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) {
void ConcurrentMarking::FlushMemoryChunkData(
MajorNonAtomicMarkingState* marking_state) {
- DCHECK_EQ(pending_task_count_, 0);
- for (int i = 1; i <= total_task_count_; i++) {
+ DCHECK(!job_handle_ || !job_handle_->IsValid());
+ for (int i = 1; i <= kMaxTasks; i++) {
MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
for (auto& pair : memory_chunk_data) {
// ClearLiveness sets the live bytes to zero.
@@ -640,7 +612,8 @@ void ConcurrentMarking::FlushMemoryChunkData(
}
void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
- for (int i = 1; i <= total_task_count_; i++) {
+ DCHECK(!job_handle_ || !job_handle_->IsValid());
+ for (int i = 1; i <= kMaxTasks; i++) {
auto it = task_state_[i].memory_chunk_data.find(chunk);
if (it != task_state_[i].memory_chunk_data.end()) {
it->second.live_bytes = 0;
@@ -651,7 +624,7 @@ void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
size_t ConcurrentMarking::TotalMarkedBytes() {
size_t result = 0;
- for (int i = 1; i <= total_task_count_; i++) {
+ for (int i = 1; i <= kMaxTasks; i++) {
result +=
base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
}
@@ -661,14 +634,12 @@ size_t ConcurrentMarking::TotalMarkedBytes() {
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
: concurrent_marking_(concurrent_marking),
- resume_on_exit_(FLAG_concurrent_marking &&
- concurrent_marking_->Stop(
- ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
+ resume_on_exit_(FLAG_concurrent_marking && concurrent_marking_->Pause()) {
DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
}
ConcurrentMarking::PauseScope::~PauseScope() {
- if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
+ if (resume_on_exit_) concurrent_marking_->RescheduleJobIfNeeded();
}
} // namespace internal
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 6ed671fb1b..4d2dda08c1 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -29,7 +29,7 @@ class Heap;
class Isolate;
class MajorNonAtomicMarkingState;
class MemoryChunk;
-struct WeakObjects;
+class WeakObjects;
struct MemoryChunkData {
intptr_t live_bytes;
@@ -54,17 +54,6 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
const bool resume_on_exit_;
};
- enum class StopRequest {
- // Preempt ongoing tasks ASAP (and cancel unstarted tasks).
- PREEMPT_TASKS,
- // Wait for ongoing tasks to complete (and cancels unstarted tasks).
- COMPLETE_ONGOING_TASKS,
- // Wait for all scheduled tasks to complete (only use this in tests that
- // control the full stack -- otherwise tasks cancelled by the platform can
- // make this call hang).
- COMPLETE_TASKS_FOR_TESTING,
- };
-
// TODO(gab): The only thing that prevents this being above 7 is
// Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use
// task 0, reserved for the main thread).
@@ -73,16 +62,22 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
ConcurrentMarking(Heap* heap, MarkingWorklists* marking_worklists,
WeakObjects* weak_objects);
- // Schedules asynchronous tasks to perform concurrent marking. Objects in the
- // heap should not be moved while these are active (can be stopped safely via
- // Stop() or PauseScope).
- void ScheduleTasks();
-
- // Stops concurrent marking per |stop_request|'s semantics. Returns true
- // if concurrent marking was in progress, false otherwise.
- bool Stop(StopRequest stop_request);
-
- void RescheduleTasksIfNeeded();
+ // Schedules asynchronous job to perform concurrent marking at |priority|.
+ // Objects in the heap should not be moved while these are active (can be
+ // stopped safely via Stop() or PauseScope).
+ void ScheduleJob(TaskPriority priority = TaskPriority::kUserVisible);
+
+ // Waits for scheduled job to complete.
+ void Join();
+ // Preempts ongoing job ASAP. Returns true if concurrent marking was in
+ // progress, false otherwise.
+ bool Pause();
+
+ // Schedules asynchronous job to perform concurrent marking at |priority| if
+ // not already running, otherwise adjusts the number of workers running job
+ // and the priority if diffrent from the default kUserVisible.
+ void RescheduleJobIfNeeded(
+ TaskPriority priority = TaskPriority::kUserVisible);
// Flushes native context sizes to the given table of the main thread.
void FlushNativeContexts(NativeContextStats* main_stats);
// Flushes memory chunk data using the given marking state.
@@ -103,31 +98,24 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
private:
struct TaskState {
- // The main thread sets this flag to true when it wants the concurrent
- // marker to give up the worker thread.
- std::atomic<bool> preemption_request;
size_t marked_bytes = 0;
- unsigned mark_compact_epoch;
- bool is_forced_gc;
MemoryChunkDataMap memory_chunk_data;
NativeContextInferrer native_context_inferrer;
NativeContextStats native_context_stats;
char cache_line_padding[64];
};
- class Task;
- void Run(int task_id, TaskState* task_state);
+ class JobTask;
+ void Run(JobDelegate* delegate, unsigned mark_compact_epoch,
+ bool is_forced_gc);
+ size_t GetMaxConcurrency(size_t worker_count);
+
+ std::unique_ptr<JobHandle> job_handle_;
Heap* const heap_;
MarkingWorklists* const marking_worklists_;
WeakObjects* const weak_objects_;
TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0};
std::atomic<bool> ephemeron_marked_{false};
- base::Mutex pending_lock_;
- base::ConditionVariable pending_condition_;
- int pending_task_count_ = 0;
- bool is_pending_[kMaxTasks + 1] = {};
- CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1] = {};
- int total_task_count_ = 0;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.cc b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
index 6c9f99272a..712be09f2c 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -12,8 +12,11 @@
#include "src/execution/isolate.h"
#include "src/flags/flags.h"
#include "src/heap/base/stack.h"
+#include "src/heap/cppgc-js/cpp-snapshot.h"
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
+#include "src/heap/cppgc-js/unified-heap-marking-verifier.h"
#include "src/heap/cppgc-js/unified-heap-marking-visitor.h"
+#include "src/heap/cppgc/concurrent-marker.h"
#include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-object-header.h"
@@ -27,6 +30,7 @@
#include "src/heap/marking-worklist.h"
#include "src/heap/sweeper.h"
#include "src/init/v8.h"
+#include "src/profiler/heap-profiler.h"
namespace v8 {
namespace internal {
@@ -63,6 +67,33 @@ class CppgcPlatformAdapter final : public cppgc::Platform {
v8::Isolate* isolate_;
};
+class UnifiedHeapConcurrentMarker
+ : public cppgc::internal::ConcurrentMarkerBase {
+ public:
+ UnifiedHeapConcurrentMarker(
+ cppgc::internal::HeapBase& heap,
+ cppgc::internal::MarkingWorklists& marking_worklists,
+ cppgc::internal::IncrementalMarkingSchedule& incremental_marking_schedule,
+ cppgc::Platform* platform,
+ UnifiedHeapMarkingState& unified_heap_marking_state)
+ : cppgc::internal::ConcurrentMarkerBase(
+ heap, marking_worklists, incremental_marking_schedule, platform),
+ unified_heap_marking_state_(unified_heap_marking_state) {}
+
+ std::unique_ptr<cppgc::Visitor> CreateConcurrentMarkingVisitor(
+ ConcurrentMarkingState&) const final;
+
+ private:
+ UnifiedHeapMarkingState& unified_heap_marking_state_;
+};
+
+std::unique_ptr<cppgc::Visitor>
+UnifiedHeapConcurrentMarker::CreateConcurrentMarkingVisitor(
+ ConcurrentMarkingState& marking_state) const {
+ return std::make_unique<ConcurrentUnifiedHeapMarkingVisitor>(
+ heap(), marking_state, unified_heap_marking_state_);
+}
+
class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
public:
UnifiedHeapMarker(Key, Heap& v8_heap, cppgc::internal::HeapBase& cpp_heap,
@@ -82,8 +113,8 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
}
private:
- UnifiedHeapMarkingState unified_heap_mutator_marking_state_;
- UnifiedHeapMarkingVisitor marking_visitor_;
+ UnifiedHeapMarkingState unified_heap_marking_state_;
+ MutatorUnifiedHeapMarkingVisitor marking_visitor_;
cppgc::internal::ConservativeMarkingVisitor conservative_marking_visitor_;
};
@@ -92,11 +123,15 @@ UnifiedHeapMarker::UnifiedHeapMarker(Key key, Heap& v8_heap,
cppgc::Platform* platform,
MarkingConfig config)
: cppgc::internal::MarkerBase(key, heap, platform, config),
- unified_heap_mutator_marking_state_(v8_heap),
+ unified_heap_marking_state_(v8_heap),
marking_visitor_(heap, mutator_marking_state_,
- unified_heap_mutator_marking_state_),
+ unified_heap_marking_state_),
conservative_marking_visitor_(heap, mutator_marking_state_,
- marking_visitor_) {}
+ marking_visitor_) {
+ concurrent_marker_ = std::make_unique<UnifiedHeapConcurrentMarker>(
+ heap_, marking_worklists_, schedule_, platform_,
+ unified_heap_marking_state_);
+}
void UnifiedHeapMarker::AddObject(void* object) {
mutator_marking_state_.MarkAndPush(
@@ -105,13 +140,22 @@ void UnifiedHeapMarker::AddObject(void* object) {
} // namespace
-CppHeap::CppHeap(v8::Isolate* isolate, size_t custom_spaces)
+CppHeap::CppHeap(
+ v8::Isolate* isolate,
+ const std::vector<std::unique_ptr<cppgc::CustomSpaceBase>>& custom_spaces)
: cppgc::internal::HeapBase(std::make_shared<CppgcPlatformAdapter>(isolate),
custom_spaces,
cppgc::internal::HeapBase::StackSupport::
kSupportsConservativeStackScan),
isolate_(*reinterpret_cast<Isolate*>(isolate)) {
CHECK(!FLAG_incremental_marking_wrappers);
+ isolate_.heap_profiler()->AddBuildEmbedderGraphCallback(&CppGraphBuilder::Run,
+ this);
+}
+
+CppHeap::~CppHeap() {
+ isolate_.heap_profiler()->RemoveBuildEmbedderGraphCallback(
+ &CppGraphBuilder::Run, this);
}
void CppHeap::RegisterV8References(
@@ -126,10 +170,19 @@ void CppHeap::RegisterV8References(
}
void CppHeap::TracePrologue(TraceFlags flags) {
+ // Finish sweeping in case it is still running.
+ sweeper_.FinishIfRunning();
+
const UnifiedHeapMarker::MarkingConfig marking_config{
UnifiedHeapMarker::MarkingConfig::CollectionType::kMajor,
cppgc::Heap::StackState::kNoHeapPointers,
- UnifiedHeapMarker::MarkingConfig::MarkingType::kIncremental};
+ UnifiedHeapMarker::MarkingConfig::MarkingType::kIncrementalAndConcurrent};
+ if ((flags == TraceFlags::kReduceMemory) || (flags == TraceFlags::kForced)) {
+ // Only enable compaction when in a memory reduction garbage collection as
+ // it may significantly increase the final garbage collection pause.
+ compactor_.InitializeIfShouldCompact(marking_config.marking_type,
+ marking_config.stack_state);
+ }
marker_ =
cppgc::internal::MarkerFactory::CreateAndStartMarking<UnifiedHeapMarker>(
*isolate_.heap(), AsBase(), platform_.get(), marking_config);
@@ -147,27 +200,39 @@ bool CppHeap::AdvanceTracing(double deadline_in_ms) {
bool CppHeap::IsTracingDone() { return marking_done_; }
void CppHeap::EnterFinalPause(EmbedderStackState stack_state) {
- marker_->EnterAtomicPause(cppgc::Heap::StackState::kNoHeapPointers);
+ marker_->EnterAtomicPause(stack_state);
+ if (compactor_.CancelIfShouldNotCompact(
+ UnifiedHeapMarker::MarkingConfig::MarkingType::kAtomic,
+ stack_state)) {
+ marker_->NotifyCompactionCancelled();
+ }
}
void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
CHECK(marking_done_);
- marker_->LeaveAtomicPause();
{
- // Pre finalizers are forbidden from allocating objects
+ // Weakness callbacks and pre-finalizers are forbidden from allocating
+ // objects.
cppgc::internal::ObjectAllocator::NoAllocationScope no_allocation_scope_(
object_allocator_);
- marker()->ProcessWeakness();
+ marker_->LeaveAtomicPause();
prefinalizer_handler()->InvokePreFinalizers();
}
marker_.reset();
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
- VerifyMarking(cppgc::Heap::StackState::kNoHeapPointers);
+ UnifiedHeapMarkingVerifier verifier(*this);
+ verifier.Run(cppgc::Heap::StackState::kNoHeapPointers);
#endif
+ cppgc::internal::Sweeper::SweepingConfig::CompactableSpaceHandling
+ compactable_space_handling = compactor_.CompactSpacesIfEnabled();
{
NoGCScope no_gc(*this);
- sweeper().Start(cppgc::internal::Sweeper::Config::kAtomic);
+ const cppgc::internal::Sweeper::SweepingConfig sweeping_config{
+ cppgc::internal::Sweeper::SweepingConfig::SweepingType::
+ kIncrementalAndConcurrent,
+ compactable_space_handling};
+ sweeper().Start(sweeping_config);
}
}
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.h b/deps/v8/src/heap/cppgc-js/cpp-heap.h
index f3bbab8b16..b2bfc6f8a7 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.h
@@ -19,7 +19,13 @@ namespace internal {
class V8_EXPORT_PRIVATE CppHeap final : public cppgc::internal::HeapBase,
public v8::EmbedderHeapTracer {
public:
- CppHeap(v8::Isolate* isolate, size_t custom_spaces);
+ CppHeap(v8::Isolate* isolate,
+ const std::vector<std::unique_ptr<cppgc::CustomSpaceBase>>&
+ custom_spaces);
+ ~CppHeap() final;
+
+ CppHeap(const CppHeap&) = delete;
+ CppHeap& operator=(const CppHeap&) = delete;
HeapBase& AsBase() { return *this; }
const HeapBase& AsBase() const { return *this; }
diff --git a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
new file mode 100644
index 0000000000..b1784baa9f
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
@@ -0,0 +1,713 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc-js/cpp-snapshot.h"
+
+#include <memory>
+
+#include "include/cppgc/trace-trait.h"
+#include "include/v8-cppgc.h"
+#include "include/v8-profiler.h"
+#include "src/api/api-inl.h"
+#include "src/base/logging.h"
+#include "src/execution/isolate.h"
+#include "src/heap/cppgc-js/cpp-heap.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-visitor.h"
+#include "src/heap/embedder-tracing.h"
+#include "src/heap/mark-compact.h"
+#include "src/objects/js-objects.h"
+#include "src/profiler/heap-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+class CppGraphBuilderImpl;
+class StateStorage;
+class State;
+
+using cppgc::internal::GCInfo;
+using cppgc::internal::GlobalGCInfoTable;
+using cppgc::internal::HeapObjectHeader;
+
+// Node representing a C++ object on the heap.
+class EmbedderNode : public v8::EmbedderGraph::Node {
+ public:
+ explicit EmbedderNode(const char* name) : name_(name) {}
+ ~EmbedderNode() override = default;
+
+ const char* Name() final { return name_; }
+ size_t SizeInBytes() override { return 0; }
+
+ void SetWrapperNode(v8::EmbedderGraph::Node* wrapper_node) {
+ wrapper_node_ = wrapper_node;
+ }
+ Node* WrapperNode() final { return wrapper_node_; }
+
+ void SetDetachedness(Detachedness detachedness) {
+ detachedness_ = detachedness;
+ }
+ Detachedness GetDetachedness() final { return detachedness_; }
+
+ private:
+ const char* name_;
+ Node* wrapper_node_ = nullptr;
+ Detachedness detachedness_ = Detachedness::kUnknown;
+};
+
+// Node representing an artificial root group, e.g., set of Persistent handles.
+class EmbedderRootNode final : public EmbedderNode {
+ public:
+ explicit EmbedderRootNode(const char* name) : EmbedderNode(name) {}
+ ~EmbedderRootNode() final = default;
+
+ bool IsRootNode() final { return true; }
+ size_t SizeInBytes() final { return 0; }
+};
+
+// Canonical state representing real and artificial (e.g. root) objects.
+class StateBase {
+ public:
+ // Objects can either be hidden/visible, or depend on some other nodes while
+ // traversing the same SCC.
+ enum class Visibility {
+ kHidden,
+ kDependentVisibility,
+ kVisible,
+ };
+
+ StateBase(const void* key, size_t state_count, Visibility visibility,
+ EmbedderNode* node, bool visited)
+ : key_(key),
+ state_count_(state_count),
+ visibility_(visibility),
+ node_(node),
+ visited_(visited) {
+ DCHECK_NE(Visibility::kDependentVisibility, visibility);
+ }
+
+ // Visited objects have already been processed or are currently being
+ // processed, see also IsPending() below.
+ bool IsVisited() const { return visited_; }
+
+ // Pending objects are currently being processed as part of the same SCC.
+ bool IsPending() const { return pending_; }
+
+ bool IsVisibleNotDependent() {
+ auto v = GetVisibility();
+ CHECK_NE(Visibility::kDependentVisibility, v);
+ return v == Visibility::kVisible;
+ }
+
+ void set_node(EmbedderNode* node) {
+ CHECK_EQ(Visibility::kVisible, GetVisibility());
+ node_ = node;
+ }
+
+ EmbedderNode* get_node() {
+ CHECK_EQ(Visibility::kVisible, GetVisibility());
+ return node_;
+ }
+
+ protected:
+ const void* key_;
+ // State count keeps track of node processing order. It is used to create only
+ // dependencies on ancestors in the sub graph which ensures that there will be
+ // no cycles in dependencies.
+ const size_t state_count_;
+
+ Visibility visibility_;
+ StateBase* visibility_dependency_ = nullptr;
+ EmbedderNode* node_;
+ bool visited_;
+ bool pending_ = false;
+
+ Visibility GetVisibility() {
+ FollowDependencies();
+ return visibility_;
+ }
+
+ StateBase* FollowDependencies() {
+ if (visibility_ != Visibility::kDependentVisibility) {
+ CHECK_NULL(visibility_dependency_);
+ return this;
+ }
+ StateBase* current = this;
+ std::vector<StateBase*> dependencies;
+ while (current->visibility_dependency_ &&
+ current->visibility_dependency_ != current) {
+ DCHECK_EQ(Visibility::kDependentVisibility, current->visibility_);
+ dependencies.push_back(current);
+ current = current->visibility_dependency_;
+ }
+ auto new_visibility = Visibility::kDependentVisibility;
+ auto* new_visibility_dependency = current;
+ if (current->visibility_ == Visibility::kVisible) {
+ new_visibility = Visibility::kVisible;
+ new_visibility_dependency = nullptr;
+ } else if (!IsPending()) {
+ DCHECK(IsVisited());
+ // The object was not visible (above case). Having a dependency on itself
+ // or null means no visible object was found.
+ new_visibility = Visibility::kHidden;
+ new_visibility_dependency = nullptr;
+ }
+ current->visibility_ = new_visibility;
+ current->visibility_dependency_ = new_visibility_dependency;
+ for (auto* state : dependencies) {
+ state->visibility_ = new_visibility;
+ state->visibility_dependency_ = new_visibility_dependency;
+ }
+ return current;
+ }
+
+ friend class State;
+};
+
+class State final : public StateBase {
+ public:
+ State(const HeapObjectHeader& header, size_t state_count)
+ : StateBase(&header, state_count, Visibility::kHidden, nullptr, false) {}
+
+ const HeapObjectHeader* header() const {
+ return static_cast<const HeapObjectHeader*>(key_);
+ }
+
+ void MarkVisited() { visited_ = true; }
+
+ void MarkPending() { pending_ = true; }
+ void UnmarkPending() { pending_ = false; }
+
+ void MarkVisible() {
+ visibility_ = Visibility::kVisible;
+ visibility_dependency_ = nullptr;
+ }
+
+ void MarkDependentVisibility(StateBase* dependency) {
+ // Follow and update dependencies as much as possible.
+ dependency = dependency->FollowDependencies();
+ DCHECK(dependency->IsVisited());
+ if (visibility_ == StateBase::Visibility::kVisible) {
+ // Already visible, no dependency needed.
+ DCHECK_NULL(visibility_dependency_);
+ return;
+ }
+ if (dependency->visibility_ == Visibility::kVisible) {
+ // Simple case: Dependency is visible.
+ visibility_ = Visibility::kVisible;
+ visibility_dependency_ = nullptr;
+ return;
+ }
+ if ((visibility_dependency_ &&
+ (visibility_dependency_->state_count_ > dependency->state_count_)) ||
+ (!visibility_dependency_ &&
+ (state_count_ > dependency->state_count_))) {
+ // Only update when new state_count_ < original state_count_. This
+ // ensures that we pick an ancestor as dependency and not a child which
+ // is guaranteed to converge to an answer.
+ //
+ // Dependency is now
+ // a) either pending with unknown visibility (same call chain), or
+ // b) not pending and has defined visibility.
+ //
+ // It's not possible to point to a state that is not pending but has
+ // dependent visibility because dependencies are updated to the top-most
+ // dependency at the beginning of method.
+ if (dependency->IsPending()) {
+ visibility_ = Visibility::kDependentVisibility;
+ visibility_dependency_ = dependency;
+ } else {
+ CHECK_NE(Visibility::kDependentVisibility, dependency->visibility_);
+ if (dependency->visibility_ == Visibility::kVisible) {
+ visibility_ = Visibility::kVisible;
+ visibility_dependency_ = nullptr;
+ }
+ }
+ }
+ }
+};
+
+// Root states are similar to regular states with the difference that they can
+// have named edges (source location of the root) that aid debugging.
+class RootState final : public StateBase {
+ public:
+ RootState(EmbedderRootNode* node, size_t state_count)
+ // Root states are always visited, visible, and have a node attached.
+ : StateBase(node, state_count, Visibility::kVisible, node, true) {}
+
+ void AddNamedEdge(std::unique_ptr<const char> edge_name) {
+ named_edges_.push_back(std::move(edge_name));
+ }
+
+ private:
+ // Edge names are passed to V8 but are required to be held alive from the
+ // embedder until the snapshot is compiled.
+ std::vector<std::unique_ptr<const char>> named_edges_;
+};
+
+// Abstraction for storing states. Storage allows for creation and lookup of
+// different state objects.
+class StateStorage final {
+ public:
+ bool StateExists(const void* key) const {
+ return states_.find(key) != states_.end();
+ }
+
+ StateBase& GetExistingState(const void* key) const {
+ CHECK(StateExists(key));
+ return *states_.at(key).get();
+ }
+
+ State& GetExistingState(const HeapObjectHeader& header) const {
+ return static_cast<State&>(GetExistingState(&header));
+ }
+
+ State& GetOrCreateState(const HeapObjectHeader& header) {
+ if (!StateExists(&header)) {
+ auto it = states_.insert(std::make_pair(
+ &header, std::make_unique<State>(header, ++state_count_)));
+ DCHECK(it.second);
+ USE(it);
+ }
+ return GetExistingState(header);
+ }
+
+ RootState& CreateRootState(EmbedderRootNode* root_node) {
+ CHECK(!StateExists(root_node));
+ auto it = states_.insert(std::make_pair(
+ root_node, std::make_unique<RootState>(root_node, ++state_count_)));
+ DCHECK(it.second);
+ USE(it);
+ return static_cast<RootState&>(*it.first->second.get());
+ }
+
+ template <typename Callback>
+ void ForAllVisibleStates(Callback callback) {
+ for (auto& state : states_) {
+ if (state.second->IsVisibleNotDependent()) {
+ callback(state.second.get());
+ }
+ }
+ }
+
+ private:
+ std::unordered_map<const void*, std::unique_ptr<StateBase>> states_;
+ size_t state_count_ = 0;
+};
+
+bool HasEmbedderDataBackref(Isolate* isolate, v8::Local<v8::Value> v8_value,
+ void* expected_backref) {
+ // See LocalEmbedderHeapTracer::VerboseWrapperTypeInfo for details on how
+ // wrapper objects are set up.
+ if (!v8_value->IsObject()) return false;
+
+ Handle<Object> v8_object = Utils::OpenHandle(*v8_value);
+ if (!v8_object->IsJSObject() || !JSObject::cast(*v8_object).IsApiWrapper())
+ return false;
+
+ JSObject js_object = JSObject::cast(*v8_object);
+ return js_object.GetEmbedderFieldCount() >= 2 &&
+ LocalEmbedderHeapTracer::VerboseWrapperInfo(
+ LocalEmbedderHeapTracer::ExtractWrapperInfo(isolate, js_object))
+ .instance() == expected_backref;
+}
+
+// The following implements a snapshotting algorithm for C++ objects that also
+// filters strongly-connected components (SCCs) of only "hidden" objects that
+// are not (transitively) referencing any non-hidden objects.
+//
+// C++ objects come in two versions.
+// a. Named objects that have been assigned a name through NameProvider.
+// b. Unnamed objects, that are potentially hidden if the build configuration
+// requires Oilpan to hide such names. Hidden objects have their name
+// set to NameProvider::kHiddenName.
+//
+// The main challenge for the algorithm is to avoid blowing up the final object
+// graph with hidden nodes that do not carry information. For that reason, the
+// algorithm filters SCCs of only hidden objects, e.g.:
+// ... -> (object) -> (object) -> (hidden) -> (hidden)
+// In this case the (hidden) objects are filtered from the graph. The trickiest
+// part is maintaining visibility state for objects referencing other objects
+// that are currently being processed.
+//
+// Main algorithm idea (two passes):
+// 1. First pass marks all non-hidden objects and those that transitively reach
+// non-hidden objects as visible. Details:
+// - Iterate over all objects.
+// - If object is non-hidden mark it as visible and also mark parent as
+// visible if needed.
+// - If object is hidden, traverse children as DFS to find non-hidden
+// objects. Post-order process the objects and mark those objects as
+// visible that have child nodes that are visible themselves.
+// - Maintain an epoch counter (StateStorage::state_count_) to allow
+// deferring the visibility decision to other objects in the same SCC. This
+// is similar to the "lowlink" value in Tarjan's algorithm for SCC.
+// - After the first pass it is guaranteed that all deferred visibility
+// decisions can be resolved.
+// 2. Second pass adds nodes and edges for all visible objects.
+// - Upon first checking the visibility state of an object, all deferred
+// visibility states are resolved.
+//
+// For practical reasons, the recursion is transformed into an iteration. We do
+// do not use plain Tarjan's algorithm to avoid another pass over all nodes to
+// create SCCs.
+class CppGraphBuilderImpl final {
+ public:
+ CppGraphBuilderImpl(CppHeap& cpp_heap, v8::EmbedderGraph& graph)
+ : cpp_heap_(cpp_heap), graph_(graph) {}
+
+ void Run();
+
+ void VisitForVisibility(State* parent, const HeapObjectHeader&);
+ void VisitForVisibility(State& parent, const TracedReferenceBase&);
+ void VisitRootForGraphBuilding(RootState&, const HeapObjectHeader&,
+ const cppgc::SourceLocation&);
+ void ProcessPendingObjects();
+
+ EmbedderRootNode* AddRootNode(const char* name) {
+ return static_cast<EmbedderRootNode*>(graph_.AddNode(
+ std::unique_ptr<v8::EmbedderGraph::Node>{new EmbedderRootNode(name)}));
+ }
+
+ EmbedderNode* AddNode(const HeapObjectHeader& header) {
+ return static_cast<EmbedderNode*>(
+ graph_.AddNode(std::unique_ptr<v8::EmbedderGraph::Node>{
+ new EmbedderNode(header.GetName().value)}));
+ }
+
+ void AddEdge(State& parent, const HeapObjectHeader& header) {
+ DCHECK(parent.IsVisibleNotDependent());
+ auto& current = states_.GetExistingState(header);
+ if (!current.IsVisibleNotDependent()) return;
+
+ // Both states are visible. Create nodes in case this is the first edge
+ // created for any of them.
+ if (!parent.get_node()) {
+ parent.set_node(AddNode(*parent.header()));
+ }
+ if (!current.get_node()) {
+ current.set_node(AddNode(header));
+ }
+ graph_.AddEdge(parent.get_node(), current.get_node());
+ }
+
+ void AddEdge(State& parent, const TracedReferenceBase& ref) {
+ DCHECK(parent.IsVisibleNotDependent());
+ v8::Local<v8::Value> v8_value = ref.Get(cpp_heap_.isolate());
+ if (!v8_value.IsEmpty()) {
+ if (!parent.get_node()) {
+ parent.set_node(AddNode(*parent.header()));
+ }
+ auto* v8_node = graph_.V8Node(v8_value);
+ graph_.AddEdge(parent.get_node(), v8_node);
+
+ // References that have a class id set may have their internal fields
+ // pointing back to the object. Set up a wrapper node for the graph so
+ // that the snapshot generator can merge the nodes appropriately.
+ if (!ref.WrapperClassId()) return;
+
+ if (HasEmbedderDataBackref(
+ reinterpret_cast<v8::internal::Isolate*>(cpp_heap_.isolate()),
+ v8_value, parent.header()->Payload())) {
+ parent.get_node()->SetWrapperNode(v8_node);
+
+ auto* profiler =
+ reinterpret_cast<Isolate*>(cpp_heap_.isolate())->heap_profiler();
+ if (profiler->HasGetDetachednessCallback()) {
+ parent.get_node()->SetDetachedness(
+ profiler->GetDetachedness(v8_value, ref.WrapperClassId()));
+ }
+ }
+ }
+ }
+
+ void AddRootEdge(RootState& root, State& child, std::string edge_name) {
+ DCHECK(root.IsVisibleNotDependent());
+ if (!child.IsVisibleNotDependent()) return;
+
+ // Root states always have a node set.
+ DCHECK_NOT_NULL(root.get_node());
+ if (!child.get_node()) {
+ child.set_node(AddNode(*child.header()));
+ }
+
+ if (!edge_name.empty()) {
+ // V8's API is based on raw C strings. Allocate and temporarily keep the
+ // edge name alive from the corresponding node.
+ const size_t len = edge_name.length();
+ char* raw_location_string = new char[len + 1];
+ strncpy(raw_location_string, edge_name.c_str(), len);
+ raw_location_string[len] = 0;
+ std::unique_ptr<const char> holder(raw_location_string);
+ graph_.AddEdge(root.get_node(), child.get_node(), holder.get());
+ root.AddNamedEdge(std::move(holder));
+ return;
+ }
+ graph_.AddEdge(root.get_node(), child.get_node());
+ }
+
+ private:
+ class WorkstackItemBase;
+ class VisitationItem;
+ class VisitationDoneItem;
+
+ CppHeap& cpp_heap_;
+ v8::EmbedderGraph& graph_;
+ StateStorage states_;
+ std::vector<std::unique_ptr<WorkstackItemBase>> workstack_;
+};
+
+// Iterating live objects to mark them as visible if needed.
+class LiveObjectsForVisibilityIterator final
+ : public cppgc::internal::HeapVisitor<LiveObjectsForVisibilityIterator> {
+ friend class cppgc::internal::HeapVisitor<LiveObjectsForVisibilityIterator>;
+
+ public:
+ explicit LiveObjectsForVisibilityIterator(CppGraphBuilderImpl& graph_builder)
+ : graph_builder_(graph_builder) {}
+
+ private:
+ bool VisitHeapObjectHeader(HeapObjectHeader* header) {
+ if (header->IsFree()) return true;
+ graph_builder_.VisitForVisibility(nullptr, *header);
+ graph_builder_.ProcessPendingObjects();
+ return true;
+ }
+
+ CppGraphBuilderImpl& graph_builder_;
+};
+
+class ParentScope final {
+ public:
+ explicit ParentScope(StateBase& parent) : parent_(parent) {}
+
+ RootState& ParentAsRootState() const {
+ return static_cast<RootState&>(parent_);
+ }
+ State& ParentAsRegularState() const { return static_cast<State&>(parent_); }
+
+ private:
+ StateBase& parent_;
+};
+
+class VisiblityVisitor final : public JSVisitor {
+ public:
+ explicit VisiblityVisitor(CppGraphBuilderImpl& graph_builder,
+ const ParentScope& parent_scope)
+ : JSVisitor(cppgc::internal::VisitorFactory::CreateKey()),
+ graph_builder_(graph_builder),
+ parent_scope_(parent_scope) {}
+
+ // C++ handling.
+ void Visit(const void*, cppgc::TraceDescriptor desc) final {
+ graph_builder_.VisitForVisibility(
+ &parent_scope_.ParentAsRegularState(),
+ HeapObjectHeader::FromPayload(desc.base_object_payload));
+ }
+ void VisitRoot(const void*, cppgc::TraceDescriptor,
+ const cppgc::SourceLocation&) final {}
+ void VisitWeakRoot(const void*, cppgc::TraceDescriptor, cppgc::WeakCallback,
+ const void*, const cppgc::SourceLocation&) final {}
+ void VisitWeakContainer(const void* object,
+ cppgc::TraceDescriptor strong_desc,
+ cppgc::TraceDescriptor weak_desc, cppgc::WeakCallback,
+ const void*) final {
+ if (!weak_desc.callback) {
+ // Weak container does not contribute to liveness.
+ return;
+ }
+ // Heap snapshot is always run after a GC so we know there are no dead
+ // entries in the backing store, thus it safe to trace it strongly.
+ if (object) {
+ Visit(object, strong_desc);
+ }
+ }
+
+ // JS handling.
+ void Visit(const TracedReferenceBase& ref) final {
+ graph_builder_.VisitForVisibility(parent_scope_.ParentAsRegularState(),
+ ref);
+ }
+
+ private:
+ CppGraphBuilderImpl& graph_builder_;
+ const ParentScope& parent_scope_;
+};
+
+class GraphBuildingVisitor final : public JSVisitor {
+ public:
+ GraphBuildingVisitor(CppGraphBuilderImpl& graph_builder,
+ const ParentScope& parent_scope)
+ : JSVisitor(cppgc::internal::VisitorFactory::CreateKey()),
+ graph_builder_(graph_builder),
+ parent_scope_(parent_scope) {}
+
+ // C++ handling.
+ void Visit(const void*, cppgc::TraceDescriptor desc) final {
+ graph_builder_.AddEdge(
+ parent_scope_.ParentAsRegularState(),
+ HeapObjectHeader::FromPayload(desc.base_object_payload));
+ }
+ void VisitRoot(const void*, cppgc::TraceDescriptor desc,
+ const cppgc::SourceLocation& loc) final {
+ graph_builder_.VisitRootForGraphBuilding(
+ parent_scope_.ParentAsRootState(),
+ HeapObjectHeader::FromPayload(desc.base_object_payload), loc);
+ }
+ void VisitWeakRoot(const void*, cppgc::TraceDescriptor, cppgc::WeakCallback,
+ const void*, const cppgc::SourceLocation&) final {}
+ // JS handling.
+ void Visit(const TracedReferenceBase& ref) final {
+ graph_builder_.AddEdge(parent_scope_.ParentAsRegularState(), ref);
+ }
+
+ private:
+ CppGraphBuilderImpl& graph_builder_;
+ const ParentScope& parent_scope_;
+};
+
+// Base class for transforming recursion into iteration. Items are processed
+// in stack fashion.
+class CppGraphBuilderImpl::WorkstackItemBase {
+ public:
+ WorkstackItemBase(State* parent, State& current)
+ : parent_(parent), current_(current) {}
+
+ virtual ~WorkstackItemBase() = default;
+ virtual void Process(CppGraphBuilderImpl&) = 0;
+
+ protected:
+ State* parent_;
+ State& current_;
+};
+
+void CppGraphBuilderImpl::ProcessPendingObjects() {
+ while (!workstack_.empty()) {
+ std::unique_ptr<WorkstackItemBase> item = std::move(workstack_.back());
+ workstack_.pop_back();
+ item->Process(*this);
+ }
+}
+
+// Post-order processing of an object. It's guaranteed that all children have
+// been processed first.
+class CppGraphBuilderImpl::VisitationDoneItem final : public WorkstackItemBase {
+ public:
+ VisitationDoneItem(State* parent, State& current)
+ : WorkstackItemBase(parent, current) {}
+
+ void Process(CppGraphBuilderImpl& graph_builder) final {
+ CHECK(parent_);
+ parent_->MarkDependentVisibility(&current_);
+ current_.UnmarkPending();
+ }
+};
+
+class CppGraphBuilderImpl::VisitationItem final : public WorkstackItemBase {
+ public:
+ VisitationItem(State* parent, State& current)
+ : WorkstackItemBase(parent, current) {}
+
+ void Process(CppGraphBuilderImpl& graph_builder) final {
+ if (parent_) {
+ // Re-add the same object for post-order processing. This must happen
+ // lazily, as the parent's visibility depends on its children.
+ graph_builder.workstack_.push_back(std::unique_ptr<WorkstackItemBase>{
+ new VisitationDoneItem(parent_, current_)});
+ }
+ ParentScope parent_scope(current_);
+ VisiblityVisitor object_visitor(graph_builder, parent_scope);
+ current_.header()->Trace(&object_visitor);
+ if (!parent_) {
+ current_.UnmarkPending();
+ }
+ }
+};
+
+void CppGraphBuilderImpl::VisitForVisibility(State* parent,
+ const HeapObjectHeader& header) {
+ auto& current = states_.GetOrCreateState(header);
+
+ if (current.IsVisited()) {
+ // Avoid traversing into already visited subgraphs and just update the state
+ // based on a previous result.
+ if (parent) {
+ parent->MarkDependentVisibility(&current);
+ }
+ return;
+ }
+
+ current.MarkVisited();
+ if (header.GetName().name_was_hidden) {
+ current.MarkPending();
+ workstack_.push_back(std::unique_ptr<WorkstackItemBase>{
+ new VisitationItem(parent, current)});
+ } else {
+ // No need to mark/unmark pending as the node is immediately processed.
+ current.MarkVisible();
+ if (parent) {
+ // Eagerly update a parent object as its visibility state is now fixed.
+ parent->MarkVisible();
+ }
+ }
+}
+
+void CppGraphBuilderImpl::VisitForVisibility(State& parent,
+ const TracedReferenceBase& ref) {
+ v8::Local<v8::Value> v8_value = ref.Get(cpp_heap_.isolate());
+ if (!v8_value.IsEmpty()) {
+ parent.MarkVisible();
+ }
+}
+
+void CppGraphBuilderImpl::VisitRootForGraphBuilding(
+ RootState& root, const HeapObjectHeader& header,
+ const cppgc::SourceLocation& loc) {
+ State& current = states_.GetExistingState(header);
+ if (!current.IsVisibleNotDependent()) return;
+
+ AddRootEdge(root, current, loc.ToString());
+}
+
+void CppGraphBuilderImpl::Run() {
+ // Sweeping from a previous GC might still be running, in which case not all
+ // pages have been returned to spaces yet.
+ cpp_heap_.sweeper().FinishIfRunning();
+ // First pass: Figure out which objects should be included in the graph -- see
+ // class-level comment on CppGraphBuilder.
+ LiveObjectsForVisibilityIterator visitor(*this);
+ visitor.Traverse(&cpp_heap_.raw_heap());
+ // Second pass: Add graph nodes for objects that must be shown.
+ states_.ForAllVisibleStates([this](StateBase* state) {
+ ParentScope parent_scope(*state);
+ GraphBuildingVisitor object_visitor(*this, parent_scope);
+ // No roots have been created so far, so all StateBase objects are State.
+ static_cast<State*>(state)->header()->Trace(&object_visitor);
+ });
+ // Add roots.
+ {
+ ParentScope parent_scope(states_.CreateRootState(AddRootNode("C++ roots")));
+ GraphBuildingVisitor object_visitor(*this, parent_scope);
+ cpp_heap_.GetStrongPersistentRegion().Trace(&object_visitor);
+ }
+ {
+ ParentScope parent_scope(
+ states_.CreateRootState(AddRootNode("C++ cross-thread roots")));
+ GraphBuildingVisitor object_visitor(*this, parent_scope);
+ cpp_heap_.GetStrongCrossThreadPersistentRegion().Trace(&object_visitor);
+ }
+}
+
+// static
+void CppGraphBuilder::Run(v8::Isolate* isolate, v8::EmbedderGraph* graph,
+ void* data) {
+ CppHeap* cpp_heap = static_cast<CppHeap*>(data);
+ CHECK_NOT_NULL(cpp_heap);
+ CHECK_NOT_NULL(graph);
+ CppGraphBuilderImpl graph_builder(*cpp_heap, *graph);
+ graph_builder.Run();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/cppgc-js/cpp-snapshot.h b/deps/v8/src/heap/cppgc-js/cpp-snapshot.h
new file mode 100644
index 0000000000..89d1026017
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/cpp-snapshot.h
@@ -0,0 +1,29 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_JS_CPP_SNAPSHOT_H_
+#define V8_HEAP_CPPGC_JS_CPP_SNAPSHOT_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+
+class Isolate;
+class EmbedderGraph;
+
+namespace internal {
+
+class V8_EXPORT_PRIVATE CppGraphBuilder final {
+ public:
+ // Add the C++ snapshot to the existing |graph|. See CppGraphBuilderImpl for
+ // algorithm internals.
+ static void Run(v8::Isolate* isolate, v8::EmbedderGraph* graph, void* data);
+
+ CppGraphBuilder() = delete;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CPPGC_JS_CPP_SNAPSHOT_H_
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
index 3ef36de504..1a1da3f278 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
@@ -6,17 +6,16 @@
#define V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_H_
#include "include/v8-cppgc.h"
+#include "include/v8.h"
#include "src/heap/heap.h"
namespace v8 {
-class JSMemberBase;
-
namespace internal {
-class JSMemberBaseExtractor {
+class BasicTracedReferenceExtractor {
public:
- static Address* ObjectReference(const JSMemberBase& ref) {
+ static Address* ObjectReference(const TracedReferenceBase& ref) {
return reinterpret_cast<Address*>(ref.val_);
}
};
@@ -28,15 +27,15 @@ class UnifiedHeapMarkingState {
UnifiedHeapMarkingState(const UnifiedHeapMarkingState&) = delete;
UnifiedHeapMarkingState& operator=(const UnifiedHeapMarkingState&) = delete;
- inline void MarkAndPush(const JSMemberBase&);
+ inline void MarkAndPush(const TracedReferenceBase&);
private:
Heap& heap_;
};
-void UnifiedHeapMarkingState::MarkAndPush(const JSMemberBase& ref) {
+void UnifiedHeapMarkingState::MarkAndPush(const TracedReferenceBase& ref) {
heap_.RegisterExternallyReferencedObject(
- JSMemberBaseExtractor::ObjectReference(ref));
+ BasicTracedReferenceExtractor::ObjectReference(ref));
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc b/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc
new file mode 100644
index 0000000000..ea14b52048
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc
@@ -0,0 +1,70 @@
+
+
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc-js/unified-heap-marking-verifier.h"
+
+#include "include/v8-cppgc.h"
+#include "src/heap/cppgc/marking-verifier.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class UnifiedHeapVerificationVisitor final : public JSVisitor {
+ public:
+ explicit UnifiedHeapVerificationVisitor(
+ cppgc::internal::VerificationState& state)
+ : JSVisitor(cppgc::internal::VisitorFactory::CreateKey()),
+ state_(state) {}
+
+ void Visit(const void*, cppgc::TraceDescriptor desc) final {
+ state_.VerifyMarked(desc.base_object_payload);
+ }
+
+ void VisitWeak(const void*, cppgc::TraceDescriptor desc, cppgc::WeakCallback,
+ const void*) final {
+ // Weak objects should have been cleared at this point. As a consequence,
+ // all objects found through weak references have to point to live objects
+ // at this point.
+ state_.VerifyMarked(desc.base_object_payload);
+ }
+
+ void VisitWeakContainer(const void* object, cppgc::TraceDescriptor,
+ cppgc::TraceDescriptor weak_desc, cppgc::WeakCallback,
+ const void*) {
+ if (!object) return;
+
+ // Contents of weak containers are found themselves through page iteration
+ // and are treated strongly, similar to how they are treated strongly when
+ // found through stack scanning. The verification here only makes sure that
+ // the container itself is properly marked.
+ state_.VerifyMarked(weak_desc.base_object_payload);
+ }
+
+ void Visit(const TracedReferenceBase& ref) final {
+ // TODO(chromium:1056170): Verify V8 object is indeed marked.
+ }
+
+ private:
+ cppgc::internal::VerificationState& state_;
+};
+
+} // namespace
+
+UnifiedHeapMarkingVerifier::UnifiedHeapMarkingVerifier(
+ cppgc::internal::HeapBase& heap_base)
+ : MarkingVerifierBase(
+ heap_base, std::make_unique<UnifiedHeapVerificationVisitor>(state_)) {
+}
+
+void UnifiedHeapMarkingVerifier::SetCurrentParent(
+ const cppgc::internal::HeapObjectHeader* parent) {
+ state_.SetCurrentParent(parent);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h
new file mode 100644
index 0000000000..3a54b4dd32
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_VERIFIER_H_
+#define V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_VERIFIER_H_
+
+#include "src/heap/cppgc/marking-verifier.h"
+
+namespace v8 {
+namespace internal {
+
+class V8_EXPORT_PRIVATE UnifiedHeapMarkingVerifier final
+ : public cppgc::internal::MarkingVerifierBase {
+ public:
+ explicit UnifiedHeapMarkingVerifier(cppgc::internal::HeapBase&);
+ ~UnifiedHeapMarkingVerifier() final = default;
+
+ void SetCurrentParent(const cppgc::internal::HeapObjectHeader*) final;
+
+ private:
+ // TODO(chromium:1056170): Use a verification state that can handle JS
+ // references.
+ cppgc::internal::VerificationState state_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_VERIFIER_H_
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
index fc39a7a3dc..e235f8885d 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
@@ -4,6 +4,7 @@
#include "src/heap/cppgc-js/unified-heap-marking-visitor.h"
+#include "include/v8.h"
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-state.h"
@@ -12,46 +13,95 @@
namespace v8 {
namespace internal {
-UnifiedHeapMarkingVisitor::UnifiedHeapMarkingVisitor(
- HeapBase& heap, MarkingState& marking_state,
+UnifiedHeapMarkingVisitorBase::UnifiedHeapMarkingVisitorBase(
+ HeapBase& heap, MarkingStateBase& marking_state,
UnifiedHeapMarkingState& unified_heap_marking_state)
: JSVisitor(cppgc::internal::VisitorFactory::CreateKey()),
marking_state_(marking_state),
unified_heap_marking_state_(unified_heap_marking_state) {}
-void UnifiedHeapMarkingVisitor::Visit(const void* object,
- TraceDescriptor desc) {
+void UnifiedHeapMarkingVisitorBase::Visit(const void* object,
+ TraceDescriptor desc) {
marking_state_.MarkAndPush(object, desc);
}
-void UnifiedHeapMarkingVisitor::VisitWeak(const void* object,
- TraceDescriptor desc,
- WeakCallback weak_callback,
- const void* weak_member) {
+void UnifiedHeapMarkingVisitorBase::VisitWeak(const void* object,
+ TraceDescriptor desc,
+ WeakCallback weak_callback,
+ const void* weak_member) {
marking_state_.RegisterWeakReferenceIfNeeded(object, desc, weak_callback,
weak_member);
}
-void UnifiedHeapMarkingVisitor::VisitRoot(const void* object,
- TraceDescriptor desc) {
- Visit(object, desc);
+void UnifiedHeapMarkingVisitorBase::VisitEphemeron(const void* key,
+ TraceDescriptor value_desc) {
+ marking_state_.ProcessEphemeron(key, value_desc);
}
-void UnifiedHeapMarkingVisitor::VisitWeakRoot(const void* object,
- TraceDescriptor desc,
- WeakCallback weak_callback,
- const void* weak_root) {
- marking_state_.InvokeWeakRootsCallbackIfNeeded(object, desc, weak_callback,
- weak_root);
+void UnifiedHeapMarkingVisitorBase::VisitWeakContainer(
+ const void* self, TraceDescriptor strong_desc, TraceDescriptor weak_desc,
+ WeakCallback callback, const void* data) {
+ marking_state_.ProcessWeakContainer(self, weak_desc, callback, data);
}
-void UnifiedHeapMarkingVisitor::RegisterWeakCallback(WeakCallback callback,
- const void* object) {
+void UnifiedHeapMarkingVisitorBase::RegisterWeakCallback(WeakCallback callback,
+ const void* object) {
marking_state_.RegisterWeakCallback(callback, object);
}
-void UnifiedHeapMarkingVisitor::Visit(const internal::JSMemberBase& ref) {
- unified_heap_marking_state_.MarkAndPush(ref);
+void UnifiedHeapMarkingVisitorBase::HandleMovableReference(const void** slot) {
+ marking_state_.RegisterMovableReference(slot);
+}
+
+namespace {
+void DeferredTraceTracedReference(cppgc::Visitor* visitor, const void* ref) {
+ static_cast<JSVisitor*>(visitor)->Trace(
+ *static_cast<const TracedReferenceBase*>(ref));
+}
+} // namespace
+
+void UnifiedHeapMarkingVisitorBase::Visit(const TracedReferenceBase& ref) {
+ bool should_defer_tracing = DeferTraceToMutatorThreadIfConcurrent(
+ &ref, DeferredTraceTracedReference, 0);
+
+ if (!should_defer_tracing) unified_heap_marking_state_.MarkAndPush(ref);
+}
+
+MutatorUnifiedHeapMarkingVisitor::MutatorUnifiedHeapMarkingVisitor(
+ HeapBase& heap, MutatorMarkingState& marking_state,
+ UnifiedHeapMarkingState& unified_heap_marking_state)
+ : UnifiedHeapMarkingVisitorBase(heap, marking_state,
+ unified_heap_marking_state) {}
+
+void MutatorUnifiedHeapMarkingVisitor::VisitRoot(const void* object,
+ TraceDescriptor desc,
+ const SourceLocation&) {
+ this->Visit(object, desc);
+}
+
+void MutatorUnifiedHeapMarkingVisitor::VisitWeakRoot(const void* object,
+ TraceDescriptor desc,
+ WeakCallback weak_callback,
+ const void* weak_root,
+ const SourceLocation&) {
+ static_cast<MutatorMarkingState&>(marking_state_)
+ .InvokeWeakRootsCallbackIfNeeded(object, desc, weak_callback, weak_root);
+}
+
+ConcurrentUnifiedHeapMarkingVisitor::ConcurrentUnifiedHeapMarkingVisitor(
+ HeapBase& heap, ConcurrentMarkingState& marking_state,
+ UnifiedHeapMarkingState& unified_heap_marking_state)
+ : UnifiedHeapMarkingVisitorBase(heap, marking_state,
+ unified_heap_marking_state) {}
+
+bool ConcurrentUnifiedHeapMarkingVisitor::DeferTraceToMutatorThreadIfConcurrent(
+ const void* parameter, cppgc::TraceCallback callback,
+ size_t deferred_size) {
+ marking_state_.concurrent_marking_bailout_worklist().Push(
+ {parameter, callback, deferred_size});
+ static_cast<ConcurrentMarkingState&>(marking_state_)
+ .AccountDeferredMarkedBytes(deferred_size);
+ return true;
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
index f80b86c1be..05e3affaa8 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
@@ -12,40 +12,84 @@
#include "src/heap/cppgc/marking-visitor.h"
namespace cppgc {
+
+class SourceLocation;
+
namespace internal {
-class MarkingState;
+class ConcurrentMarkingState;
+class MarkingStateBase;
+class MutatorMarkingState;
} // namespace internal
} // namespace cppgc
namespace v8 {
namespace internal {
+using cppgc::SourceLocation;
using cppgc::TraceDescriptor;
using cppgc::WeakCallback;
+using cppgc::internal::ConcurrentMarkingState;
using cppgc::internal::HeapBase;
-using cppgc::internal::MarkingState;
+using cppgc::internal::MarkingStateBase;
+using cppgc::internal::MutatorMarkingState;
-class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitor : public JSVisitor {
+class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitorBase : public JSVisitor {
public:
- UnifiedHeapMarkingVisitor(HeapBase&, MarkingState&, UnifiedHeapMarkingState&);
- ~UnifiedHeapMarkingVisitor() override = default;
+ UnifiedHeapMarkingVisitorBase(HeapBase&, MarkingStateBase&,
+ UnifiedHeapMarkingState&);
+ ~UnifiedHeapMarkingVisitorBase() override = default;
- private:
+ protected:
// C++ handling.
void Visit(const void*, TraceDescriptor) final;
void VisitWeak(const void*, TraceDescriptor, WeakCallback, const void*) final;
- void VisitRoot(const void*, TraceDescriptor) final;
- void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
- const void*) final;
+ void VisitEphemeron(const void*, TraceDescriptor) final;
+ void VisitWeakContainer(const void* self, TraceDescriptor strong_desc,
+ TraceDescriptor weak_desc, WeakCallback callback,
+ const void* data) final;
void RegisterWeakCallback(WeakCallback, const void*) final;
+ void HandleMovableReference(const void**) final;
// JS handling.
- void Visit(const internal::JSMemberBase& ref) final;
+ void Visit(const TracedReferenceBase& ref) final;
- MarkingState& marking_state_;
+ MarkingStateBase& marking_state_;
UnifiedHeapMarkingState& unified_heap_marking_state_;
};
+class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor final
+ : public UnifiedHeapMarkingVisitorBase {
+ public:
+ MutatorUnifiedHeapMarkingVisitor(HeapBase&, MutatorMarkingState&,
+ UnifiedHeapMarkingState&);
+ ~MutatorUnifiedHeapMarkingVisitor() override = default;
+
+ protected:
+ void VisitRoot(const void*, TraceDescriptor, const SourceLocation&) final;
+ void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback, const void*,
+ const SourceLocation&) final;
+};
+
+class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor final
+ : public UnifiedHeapMarkingVisitorBase {
+ public:
+ ConcurrentUnifiedHeapMarkingVisitor(HeapBase&, ConcurrentMarkingState&,
+ UnifiedHeapMarkingState&);
+ ~ConcurrentUnifiedHeapMarkingVisitor() override = default;
+
+ protected:
+ void VisitRoot(const void*, TraceDescriptor, const SourceLocation&) final {
+ UNREACHABLE();
+ }
+ void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback, const void*,
+ const SourceLocation&) final {
+ UNREACHABLE();
+ }
+
+ bool DeferTraceToMutatorThreadIfConcurrent(const void*, cppgc::TraceCallback,
+ size_t) final;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/cppgc/compaction-worklists.cc b/deps/v8/src/heap/cppgc/compaction-worklists.cc
new file mode 100644
index 0000000000..bb182a58c8
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/compaction-worklists.cc
@@ -0,0 +1,14 @@
+
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/compaction-worklists.h"
+
+namespace cppgc {
+namespace internal {
+
+void CompactionWorklists::ClearForTesting() { movable_slots_worklist_.Clear(); }
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/compaction-worklists.h b/deps/v8/src/heap/cppgc/compaction-worklists.h
new file mode 100644
index 0000000000..6222bd9a92
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/compaction-worklists.h
@@ -0,0 +1,35 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_COMPACTION_WORKLISTS_H_
+#define V8_HEAP_CPPGC_COMPACTION_WORKLISTS_H_
+
+#include <unordered_set>
+
+#include "src/heap/base/worklist.h"
+
+namespace cppgc {
+namespace internal {
+
+class CompactionWorklists {
+ public:
+ using MovableReference = const void*;
+
+ using MovableReferencesWorklist =
+ heap::base::Worklist<MovableReference*, 256 /* local entries */>;
+
+ MovableReferencesWorklist* movable_slots_worklist() {
+ return &movable_slots_worklist_;
+ }
+
+ void ClearForTesting();
+
+ private:
+ MovableReferencesWorklist movable_slots_worklist_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_COMPACTION_WORKLISTS_H_
diff --git a/deps/v8/src/heap/cppgc/compactor.cc b/deps/v8/src/heap/cppgc/compactor.cc
new file mode 100644
index 0000000000..23869d2f14
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/compactor.cc
@@ -0,0 +1,505 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/compactor.h"
+
+#include <map>
+#include <numeric>
+#include <unordered_map>
+#include <unordered_set>
+
+#include "include/cppgc/macros.h"
+#include "src/heap/cppgc/compaction-worklists.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-base.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/raw-heap.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+// Freelist size threshold that must be exceeded before compaction
+// should be considered.
+static constexpr size_t kFreeListSizeThreshold = 512 * kKB;
+
+// The real worker behind heap compaction, recording references to movable
+// objects ("slots".) When the objects end up being compacted and moved,
+// relocate() will adjust the slots to point to the new location of the
+// object along with handling references for interior pointers.
+//
+// The MovableReferences object is created and maintained for the lifetime
+// of one heap compaction-enhanced GC.
+class MovableReferences final {
+ using MovableReference = CompactionWorklists::MovableReference;
+
+ public:
+ explicit MovableReferences(HeapBase& heap) : heap_(heap) {}
+
+ // Adds a slot for compaction. Filters slots in dead objects.
+ void AddOrFilter(MovableReference*);
+
+ // Relocates a backing store |from| -> |to|.
+ void Relocate(Address from, Address to);
+
+ // Relocates interior slots in a backing store that is moved |from| -> |to|.
+ void RelocateInteriorReferences(Address from, Address to, size_t size);
+
+ // Updates the collection of callbacks from the item pushed the worklist by
+ // marking visitors.
+ void UpdateCallbacks();
+
+ private:
+ HeapBase& heap_;
+
+ // Map from movable reference (value) to its slot. Upon moving an object its
+ // slot pointing to it requires updating. Movable reference should currently
+ // have only a single movable reference to them registered.
+ std::unordered_map<MovableReference, MovableReference*> movable_references_;
+
+ // Map of interior slots to their final location. Needs to be an ordered map
+ // as it is used to walk through slots starting at a given memory address.
+ // Requires log(n) lookup to make the early bailout reasonably fast.
+ //
+ // - The initial value for a given key is nullptr.
+ // - Upon moving an object this value is adjusted accordingly.
+ std::map<MovableReference*, Address> interior_movable_references_;
+
+#if DEBUG
+ // The following two collections are used to allow refer back from a slot to
+ // an already moved object.
+ std::unordered_set<const void*> moved_objects_;
+ std::unordered_map<MovableReference*, MovableReference>
+ interior_slot_to_object_;
+#endif // DEBUG
+};
+
+void MovableReferences::AddOrFilter(MovableReference* slot) {
+ const BasePage* slot_page = BasePage::FromInnerAddress(&heap_, slot);
+ CHECK_NOT_NULL(slot_page);
+
+ const void* value = *slot;
+ if (!value) return;
+
+ // All slots and values are part of Oilpan's heap.
+ // - Slots may be contained within dead objects if e.g. the write barrier
+ // registered the slot while backing itself has not been marked live in
+ // time. Slots in dead objects are filtered below.
+ // - Values may only be contained in or point to live objects.
+
+ const HeapObjectHeader& slot_header =
+ slot_page->ObjectHeaderFromInnerAddress(slot);
+ // Filter the slot since the object that contains the slot is dead.
+ if (!slot_header.IsMarked()) return;
+
+ const BasePage* value_page = BasePage::FromInnerAddress(&heap_, value);
+ CHECK_NOT_NULL(value_page);
+
+ // The following cases are not compacted and do not require recording:
+ // - Compactable object on large pages.
+ // - Compactable object on non-compactable spaces.
+ if (value_page->is_large() || !value_page->space()->is_compactable()) return;
+
+ // Slots must reside in and values must point to live objects at this
+ // point. |value| usually points to a separate object but can also point
+ // to the an interior pointer in the same object storage which is why the
+ // dynamic header lookup is required.
+ const HeapObjectHeader& value_header =
+ value_page->ObjectHeaderFromInnerAddress(value);
+ CHECK(value_header.IsMarked());
+
+ // Slots may have been recorded already but must point to the same value.
+ auto reference_it = movable_references_.find(value);
+ if (V8_UNLIKELY(reference_it != movable_references_.end())) {
+ CHECK_EQ(slot, reference_it->second);
+ return;
+ }
+
+ // Add regular movable reference.
+ movable_references_.emplace(value, slot);
+
+ // Check whether the slot itself resides on a page that is compacted.
+ if (V8_LIKELY(!slot_page->space()->is_compactable())) return;
+
+ CHECK_EQ(interior_movable_references_.end(),
+ interior_movable_references_.find(slot));
+ interior_movable_references_.emplace(slot, nullptr);
+#if DEBUG
+ interior_slot_to_object_.emplace(slot, slot_header.Payload());
+#endif // DEBUG
+}
+
+void MovableReferences::Relocate(Address from, Address to) {
+#if DEBUG
+ moved_objects_.insert(from);
+#endif // DEBUG
+
+ // Interior slots always need to be processed for moved objects.
+ // Consider an object A with slot A.x pointing to value B where A is
+ // allocated on a movable page itself. When B is finally moved, it needs to
+ // find the corresponding slot A.x. Object A may be moved already and the
+ // memory may have been freed, which would result in a crash.
+ if (!interior_movable_references_.empty()) {
+ const HeapObjectHeader& header = HeapObjectHeader::FromPayload(to);
+ const size_t size = header.GetSize() - sizeof(HeapObjectHeader);
+ RelocateInteriorReferences(from, to, size);
+ }
+
+ auto it = movable_references_.find(from);
+ // This means that there is no corresponding slot for a live object.
+ // This may happen because a mutator may change the slot to point to a
+ // different object because e.g. incremental marking marked an object
+ // as live that was later on replaced.
+ if (it == movable_references_.end()) {
+ return;
+ }
+
+ // If the object is referenced by a slot that is contained on a compacted
+ // area itself, check whether it can be updated already.
+ MovableReference* slot = it->second;
+ auto interior_it = interior_movable_references_.find(slot);
+ if (interior_it != interior_movable_references_.end()) {
+ MovableReference* slot_location =
+ reinterpret_cast<MovableReference*>(interior_it->second);
+ if (!slot_location) {
+ interior_it->second = to;
+#if DEBUG
+ // Check that the containing object has not been moved yet.
+ auto reverse_it = interior_slot_to_object_.find(slot);
+ DCHECK_NE(interior_slot_to_object_.end(), reverse_it);
+ DCHECK_EQ(moved_objects_.end(), moved_objects_.find(reverse_it->second));
+#endif // DEBUG
+ } else {
+ slot = slot_location;
+ }
+ }
+
+ // Compaction is atomic so slot should not be updated during compaction.
+ DCHECK_EQ(from, *slot);
+
+ // Update the slots new value.
+ *slot = to;
+}
+
+void MovableReferences::RelocateInteriorReferences(Address from, Address to,
+ size_t size) {
+ // |from| is a valid address for a slot.
+ auto interior_it = interior_movable_references_.lower_bound(
+ reinterpret_cast<MovableReference*>(from));
+ if (interior_it == interior_movable_references_.end()) return;
+ DCHECK_GE(reinterpret_cast<Address>(interior_it->first), from);
+
+ size_t offset = reinterpret_cast<Address>(interior_it->first) - from;
+ while (offset < size) {
+ if (!interior_it->second) {
+ // Update the interior reference value, so that when the object the slot
+ // is pointing to is moved, it can re-use this value.
+ Address refernece = to + offset;
+ interior_it->second = refernece;
+
+ // If the |slot|'s content is pointing into the region [from, from +
+ // size) we are dealing with an interior pointer that does not point to
+ // a valid HeapObjectHeader. Such references need to be fixed up
+ // immediately.
+ Address& reference_contents = *reinterpret_cast<Address*>(refernece);
+ if (reference_contents > from && reference_contents < (from + size)) {
+ reference_contents = reference_contents - from + to;
+ }
+ }
+
+ interior_it++;
+ if (interior_it == interior_movable_references_.end()) return;
+ offset = reinterpret_cast<Address>(interior_it->first) - from;
+ }
+}
+
+class CompactionState final {
+ CPPGC_STACK_ALLOCATED();
+ using Pages = std::vector<NormalPage*>;
+
+ public:
+ CompactionState(NormalPageSpace* space, MovableReferences& movable_references)
+ : space_(space), movable_references_(movable_references) {}
+
+ void AddPage(NormalPage* page) {
+ DCHECK_EQ(space_, page->space());
+ // If not the first page, add |page| onto the available pages chain.
+ if (!current_page_)
+ current_page_ = page;
+ else
+ available_pages_.push_back(page);
+ }
+
+ void RelocateObject(const NormalPage* page, const Address header,
+ size_t size) {
+ // Allocate and copy over the live object.
+ Address compact_frontier =
+ current_page_->PayloadStart() + used_bytes_in_current_page_;
+ if (compact_frontier + size > current_page_->PayloadEnd()) {
+ // Can't fit on current page. Add remaining onto the freelist and advance
+ // to next available page.
+ ReturnCurrentPageToSpace();
+
+ current_page_ = available_pages_.back();
+ available_pages_.pop_back();
+ used_bytes_in_current_page_ = 0;
+ compact_frontier = current_page_->PayloadStart();
+ }
+ if (V8_LIKELY(compact_frontier != header)) {
+ // Use a non-overlapping copy, if possible.
+ if (current_page_ == page)
+ memmove(compact_frontier, header, size);
+ else
+ memcpy(compact_frontier, header, size);
+ movable_references_.Relocate(header + sizeof(HeapObjectHeader),
+ compact_frontier + sizeof(HeapObjectHeader));
+ }
+ current_page_->object_start_bitmap().SetBit(compact_frontier);
+ used_bytes_in_current_page_ += size;
+ DCHECK_LE(used_bytes_in_current_page_, current_page_->PayloadSize());
+ }
+
+ void FinishCompactingSpace() {
+ // If the current page hasn't been allocated into, add it to the available
+ // list, for subsequent release below.
+ if (used_bytes_in_current_page_ == 0) {
+ available_pages_.push_back(current_page_);
+ } else {
+ ReturnCurrentPageToSpace();
+ }
+
+ // Return remaining available pages to the free page pool, decommitting
+ // them from the pagefile.
+ for (NormalPage* page : available_pages_) {
+ SET_MEMORY_INACCESSIBLE(page->PayloadStart(), page->PayloadSize());
+ NormalPage::Destroy(page);
+ }
+ }
+
+ void FinishCompactingPage(NormalPage* page) {
+#if DEBUG || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
+ defined(MEMORY_SANITIZER)
+ // Zap the unused portion, until it is either compacted into or freed.
+ if (current_page_ != page) {
+ ZapMemory(page->PayloadStart(), page->PayloadSize());
+ } else {
+ ZapMemory(page->PayloadStart() + used_bytes_in_current_page_,
+ page->PayloadSize() - used_bytes_in_current_page_);
+ }
+#endif
+ }
+
+ private:
+ void ReturnCurrentPageToSpace() {
+ DCHECK_EQ(space_, current_page_->space());
+ space_->AddPage(current_page_);
+ if (used_bytes_in_current_page_ != current_page_->PayloadSize()) {
+ // Put the remainder of the page onto the free list.
+ size_t freed_size =
+ current_page_->PayloadSize() - used_bytes_in_current_page_;
+ Address payload = current_page_->PayloadStart();
+ Address free_start = payload + used_bytes_in_current_page_;
+ SET_MEMORY_INACCESSIBLE(free_start, freed_size);
+ space_->free_list().Add({free_start, freed_size});
+ current_page_->object_start_bitmap().SetBit(free_start);
+ }
+ }
+
+ NormalPageSpace* space_;
+ MovableReferences& movable_references_;
+ // Page into which compacted object will be written to.
+ NormalPage* current_page_ = nullptr;
+ // Offset into |current_page_| to the next free address.
+ size_t used_bytes_in_current_page_ = 0;
+ // Additional pages in the current space that can be used as compaction
+ // targets. Pages that remain available at the compaction can be released.
+ Pages available_pages_;
+};
+
+void CompactPage(NormalPage* page, CompactionState& compaction_state) {
+ compaction_state.AddPage(page);
+
+ page->object_start_bitmap().Clear();
+
+ for (Address header_address = page->PayloadStart();
+ header_address < page->PayloadEnd();) {
+ HeapObjectHeader* header =
+ reinterpret_cast<HeapObjectHeader*>(header_address);
+ size_t size = header->GetSize();
+ DCHECK_GT(size, 0u);
+ DCHECK_LT(size, kPageSize);
+
+ if (header->IsFree()) {
+ // Unpoison the freelist entry so that we can compact into it as wanted.
+ ASAN_UNPOISON_MEMORY_REGION(header_address, size);
+ header_address += size;
+ continue;
+ }
+
+ if (!header->IsMarked()) {
+ // Compaction is currently launched only from AtomicPhaseEpilogue, so it's
+ // guaranteed to be on the mutator thread - no need to postpone
+ // finalization.
+ header->Finalize();
+
+ // As compaction is under way, leave the freed memory accessible
+ // while compacting the rest of the page. We just zap the payload
+ // to catch out other finalizers trying to access it.
+#if DEBUG || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
+ defined(MEMORY_SANITIZER)
+ ZapMemory(header, size);
+#endif
+ header_address += size;
+ continue;
+ }
+
+ // Object is marked.
+#if !defined(CPPGC_YOUNG_GENERATION)
+ header->Unmark();
+#endif
+ compaction_state.RelocateObject(page, header_address, size);
+ header_address += size;
+ }
+
+ compaction_state.FinishCompactingPage(page);
+}
+
+void CompactSpace(NormalPageSpace* space,
+ MovableReferences& movable_references) {
+ using Pages = NormalPageSpace::Pages;
+
+ DCHECK(space->is_compactable());
+
+ space->free_list().Clear();
+
+ // Compaction generally follows Jonker's algorithm for fast garbage
+ // compaction. Compaction is performed in-place, sliding objects down over
+ // unused holes for a smaller heap page footprint and improved locality. A
+ // "compaction pointer" is consequently kept, pointing to the next available
+ // address to move objects down to. It will belong to one of the already
+ // compacted pages for this space, but as compaction proceeds, it will not
+ // belong to the same page as the one being currently compacted.
+ //
+ // The compaction pointer is represented by the
+ // |(current_page_, used_bytes_in_current_page_)| pair, with
+ // |used_bytes_in_current_page_| being the offset into |current_page_|, making
+ // up the next available location. When the compaction of an arena page causes
+ // the compaction pointer to exhaust the current page it is compacting into,
+ // page compaction will advance the current page of the compaction
+ // pointer, as well as the allocation point.
+ //
+ // By construction, the page compaction can be performed without having
+ // to allocate any new pages. So to arrange for the page compaction's
+ // supply of freed, available pages, we chain them together after each
+ // has been "compacted from". The page compaction will then reuse those
+ // as needed, and once finished, the chained, available pages can be
+ // released back to the OS.
+ //
+ // To ease the passing of the compaction state when iterating over an
+ // arena's pages, package it up into a |CompactionState|.
+
+ Pages pages = space->RemoveAllPages();
+ if (pages.empty()) return;
+
+ CompactionState compaction_state(space, movable_references);
+ for (BasePage* page : pages) {
+ // Large objects do not belong to this arena.
+ CompactPage(NormalPage::From(page), compaction_state);
+ }
+
+ compaction_state.FinishCompactingSpace();
+ // Sweeping will verify object start bitmap of compacted space.
+}
+
+size_t UpdateHeapResidency(const std::vector<NormalPageSpace*>& spaces) {
+ return std::accumulate(spaces.cbegin(), spaces.cend(), 0u,
+ [](size_t acc, const NormalPageSpace* space) {
+ DCHECK(space->is_compactable());
+ if (!space->size()) return acc;
+ return acc + space->free_list().Size();
+ });
+}
+
+} // namespace
+
+Compactor::Compactor(RawHeap& heap) : heap_(heap) {
+ for (auto& space : heap_) {
+ if (!space->is_compactable()) continue;
+ DCHECK_EQ(&heap, space->raw_heap());
+ compactable_spaces_.push_back(static_cast<NormalPageSpace*>(space.get()));
+ }
+}
+
+bool Compactor::ShouldCompact(
+ GarbageCollector::Config::MarkingType marking_type,
+ GarbageCollector::Config::StackState stack_state) {
+ if (compactable_spaces_.empty() ||
+ (marking_type == GarbageCollector::Config::MarkingType::kAtomic &&
+ stack_state ==
+ GarbageCollector::Config::StackState::kMayContainHeapPointers)) {
+ // The following check ensures that tests that want to test compaction are
+ // not interrupted by garbage collections that cannot use compaction.
+ DCHECK(!enable_for_next_gc_for_testing_);
+ return false;
+ }
+
+ if (enable_for_next_gc_for_testing_) {
+ return true;
+ }
+
+ size_t free_list_size = UpdateHeapResidency(compactable_spaces_);
+
+ return free_list_size > kFreeListSizeThreshold;
+}
+
+void Compactor::InitializeIfShouldCompact(
+ GarbageCollector::Config::MarkingType marking_type,
+ GarbageCollector::Config::StackState stack_state) {
+ DCHECK(!is_enabled_);
+
+ if (!ShouldCompact(marking_type, stack_state)) return;
+
+ compaction_worklists_ = std::make_unique<CompactionWorklists>();
+
+ is_enabled_ = true;
+ enable_for_next_gc_for_testing_ = false;
+}
+
+bool Compactor::CancelIfShouldNotCompact(
+ GarbageCollector::Config::MarkingType marking_type,
+ GarbageCollector::Config::StackState stack_state) {
+ if (!is_enabled_ || ShouldCompact(marking_type, stack_state)) return false;
+
+ DCHECK_NOT_NULL(compaction_worklists_);
+ compaction_worklists_->movable_slots_worklist()->Clear();
+ compaction_worklists_.reset();
+
+ is_enabled_ = false;
+ return true;
+}
+
+Compactor::CompactableSpaceHandling Compactor::CompactSpacesIfEnabled() {
+ if (!is_enabled_) return CompactableSpaceHandling::kSweep;
+
+ MovableReferences movable_references(*heap_.heap());
+
+ CompactionWorklists::MovableReferencesWorklist::Local local(
+ compaction_worklists_->movable_slots_worklist());
+ CompactionWorklists::MovableReference* slot;
+ while (local.Pop(&slot)) {
+ movable_references.AddOrFilter(slot);
+ }
+ compaction_worklists_.reset();
+
+ for (NormalPageSpace* space : compactable_spaces_) {
+ CompactSpace(space, movable_references);
+ }
+
+ is_enabled_ = false;
+ return CompactableSpaceHandling::kIgnore;
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/compactor.h b/deps/v8/src/heap/cppgc/compactor.h
new file mode 100644
index 0000000000..d354274a33
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/compactor.h
@@ -0,0 +1,56 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_COMPACTOR_H_
+#define V8_HEAP_CPPGC_COMPACTOR_H_
+
+#include "src/heap/cppgc/compaction-worklists.h"
+#include "src/heap/cppgc/garbage-collector.h"
+#include "src/heap/cppgc/raw-heap.h"
+
+namespace cppgc {
+namespace internal {
+
+class V8_EXPORT_PRIVATE Compactor final {
+ using CompactableSpaceHandling =
+ Sweeper::SweepingConfig::CompactableSpaceHandling;
+
+ public:
+ explicit Compactor(RawHeap&);
+ ~Compactor() { DCHECK(!is_enabled_); }
+
+ void InitializeIfShouldCompact(GarbageCollector::Config::MarkingType,
+ GarbageCollector::Config::StackState);
+ // Returns true is compaction was cancelled.
+ bool CancelIfShouldNotCompact(GarbageCollector::Config::MarkingType,
+ GarbageCollector::Config::StackState);
+ CompactableSpaceHandling CompactSpacesIfEnabled();
+
+ CompactionWorklists* compaction_worklists() {
+ return compaction_worklists_.get();
+ }
+
+ void EnableForNextGCForTesting() { enable_for_next_gc_for_testing_ = true; }
+
+ bool IsEnabledForTesting() const { return is_enabled_; }
+
+ private:
+ bool ShouldCompact(GarbageCollector::Config::MarkingType,
+ GarbageCollector::Config::StackState);
+
+ RawHeap& heap_;
+ // Compactor does not own the compactable spaces. The heap owns all spaces.
+ std::vector<NormalPageSpace*> compactable_spaces_;
+
+ std::unique_ptr<CompactionWorklists> compaction_worklists_;
+
+ bool is_enabled_ = false;
+
+ bool enable_for_next_gc_for_testing_ = false;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_COMPACTOR_H_
diff --git a/deps/v8/src/heap/cppgc/concurrent-marker.cc b/deps/v8/src/heap/cppgc/concurrent-marker.cc
new file mode 100644
index 0000000000..5df422fa5c
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/concurrent-marker.cc
@@ -0,0 +1,246 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/concurrent-marker.h"
+
+#include "include/cppgc/platform.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/liveness-broker.h"
+#include "src/heap/cppgc/marking-state.h"
+#include "src/heap/cppgc/marking-visitor.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+static constexpr double kMarkingScheduleRatioBeforeConcurrentPriorityIncrease =
+ 0.5;
+
+static constexpr size_t kDefaultDeadlineCheckInterval = 750u;
+
+template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
+ typename WorklistLocal, typename Callback>
+bool DrainWorklistWithYielding(
+ JobDelegate* job_delegate, ConcurrentMarkingState& marking_state,
+ IncrementalMarkingSchedule& incremental_marking_schedule,
+ WorklistLocal& worklist_local, Callback callback) {
+ return DrainWorklistWithPredicate<kDeadlineCheckInterval>(
+ [&incremental_marking_schedule, &marking_state, job_delegate]() {
+ incremental_marking_schedule.AddConcurrentlyMarkedBytes(
+ marking_state.RecentlyMarkedBytes());
+ return job_delegate->ShouldYield();
+ },
+ worklist_local, callback);
+}
+
+size_t WorkSizeForConcurrentMarking(MarkingWorklists& marking_worklists) {
+ return marking_worklists.marking_worklist()->Size() +
+ marking_worklists.write_barrier_worklist()->Size() +
+ marking_worklists.previously_not_fully_constructed_worklist()->Size();
+}
+
+// Checks whether worklists' global pools hold any segment a concurrent marker
+// can steal. This is called before the concurrent marker holds any Locals, so
+// no need to check local segments.
+bool HasWorkForConcurrentMarking(MarkingWorklists& marking_worklists) {
+ return !marking_worklists.marking_worklist()->IsEmpty() ||
+ !marking_worklists.write_barrier_worklist()->IsEmpty() ||
+ !marking_worklists.previously_not_fully_constructed_worklist()
+ ->IsEmpty();
+}
+
+class ConcurrentMarkingTask final : public v8::JobTask {
+ public:
+ explicit ConcurrentMarkingTask(ConcurrentMarkerBase&);
+
+ void Run(JobDelegate* delegate) final;
+
+ size_t GetMaxConcurrency(size_t) const final;
+
+ private:
+ void ProcessWorklists(JobDelegate*, ConcurrentMarkingState&, Visitor&);
+
+ const ConcurrentMarkerBase& concurrent_marker_;
+};
+
+ConcurrentMarkingTask::ConcurrentMarkingTask(
+ ConcurrentMarkerBase& concurrent_marker)
+ : concurrent_marker_(concurrent_marker) {}
+
+void ConcurrentMarkingTask::Run(JobDelegate* job_delegate) {
+ if (!HasWorkForConcurrentMarking(concurrent_marker_.marking_worklists()))
+ return;
+ ConcurrentMarkingState concurrent_marking_state(
+ concurrent_marker_.heap(), concurrent_marker_.marking_worklists(),
+ concurrent_marker_.heap().compactor().compaction_worklists());
+ std::unique_ptr<Visitor> concurrent_marking_visitor =
+ concurrent_marker_.CreateConcurrentMarkingVisitor(
+ concurrent_marking_state);
+ ProcessWorklists(job_delegate, concurrent_marking_state,
+ *concurrent_marking_visitor.get());
+ concurrent_marker_.incremental_marking_schedule().AddConcurrentlyMarkedBytes(
+ concurrent_marking_state.RecentlyMarkedBytes());
+ concurrent_marking_state.Publish();
+}
+
+size_t ConcurrentMarkingTask::GetMaxConcurrency(
+ size_t current_worker_count) const {
+ return WorkSizeForConcurrentMarking(concurrent_marker_.marking_worklists()) +
+ current_worker_count;
+}
+
+void ConcurrentMarkingTask::ProcessWorklists(
+ JobDelegate* job_delegate, ConcurrentMarkingState& concurrent_marking_state,
+ Visitor& concurrent_marking_visitor) {
+ do {
+ if (!DrainWorklistWithYielding(
+ job_delegate, concurrent_marking_state,
+ concurrent_marker_.incremental_marking_schedule(),
+ concurrent_marking_state
+ .previously_not_fully_constructed_worklist(),
+ [&concurrent_marking_state,
+ &concurrent_marking_visitor](HeapObjectHeader* header) {
+ BasePage::FromPayload(header)->SynchronizedLoad();
+ concurrent_marking_state.AccountMarkedBytes(*header);
+ DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
+ concurrent_marking_visitor, *header);
+ })) {
+ return;
+ }
+
+ if (!DrainWorklistWithYielding(
+ job_delegate, concurrent_marking_state,
+ concurrent_marker_.incremental_marking_schedule(),
+ concurrent_marking_state.marking_worklist(),
+ [&concurrent_marking_state, &concurrent_marking_visitor](
+ const MarkingWorklists::MarkingItem& item) {
+ BasePage::FromPayload(item.base_object_payload)
+ ->SynchronizedLoad();
+ const HeapObjectHeader& header =
+ HeapObjectHeader::FromPayload(item.base_object_payload);
+ DCHECK(!header.IsInConstruction<AccessMode::kAtomic>());
+ DCHECK(header.IsMarked<AccessMode::kAtomic>());
+ concurrent_marking_state.AccountMarkedBytes(header);
+ item.callback(&concurrent_marking_visitor,
+ item.base_object_payload);
+ })) {
+ return;
+ }
+
+ if (!DrainWorklistWithYielding(
+ job_delegate, concurrent_marking_state,
+ concurrent_marker_.incremental_marking_schedule(),
+ concurrent_marking_state.write_barrier_worklist(),
+ [&concurrent_marking_state,
+ &concurrent_marking_visitor](HeapObjectHeader* header) {
+ BasePage::FromPayload(header)->SynchronizedLoad();
+ concurrent_marking_state.AccountMarkedBytes(*header);
+ DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
+ concurrent_marking_visitor, *header);
+ })) {
+ return;
+ }
+
+ if (!DrainWorklistWithYielding(
+ job_delegate, concurrent_marking_state,
+ concurrent_marker_.incremental_marking_schedule(),
+ concurrent_marking_state.ephemeron_pairs_for_processing_worklist(),
+ [&concurrent_marking_state](
+ const MarkingWorklists::EphemeronPairItem& item) {
+ concurrent_marking_state.ProcessEphemeron(item.key,
+ item.value_desc);
+ })) {
+ return;
+ }
+ } while (
+ !concurrent_marking_state.marking_worklist().IsLocalAndGlobalEmpty());
+}
+
+} // namespace
+
+ConcurrentMarkerBase::ConcurrentMarkerBase(
+ HeapBase& heap, MarkingWorklists& marking_worklists,
+ IncrementalMarkingSchedule& incremental_marking_schedule,
+ cppgc::Platform* platform)
+ : heap_(heap),
+ marking_worklists_(marking_worklists),
+ incremental_marking_schedule_(incremental_marking_schedule),
+ platform_(platform) {}
+
+void ConcurrentMarkerBase::Start() {
+ DCHECK(platform_);
+ concurrent_marking_handle_ =
+ platform_->PostJob(v8::TaskPriority::kUserVisible,
+ std::make_unique<ConcurrentMarkingTask>(*this));
+}
+
+void ConcurrentMarkerBase::Cancel() {
+ if (concurrent_marking_handle_ && concurrent_marking_handle_->IsValid())
+ concurrent_marking_handle_->Cancel();
+}
+
+void ConcurrentMarkerBase::JoinForTesting() {
+ if (concurrent_marking_handle_ && concurrent_marking_handle_->IsValid())
+ concurrent_marking_handle_->Join();
+}
+
+bool ConcurrentMarkerBase::IsActive() const {
+ return concurrent_marking_handle_ && concurrent_marking_handle_->IsRunning();
+}
+
+ConcurrentMarkerBase::~ConcurrentMarkerBase() {
+ CHECK_IMPLIES(concurrent_marking_handle_,
+ !concurrent_marking_handle_->IsValid());
+}
+
+bool ConcurrentMarkerBase::NotifyIncrementalMutatorStepCompleted() {
+ DCHECK(concurrent_marking_handle_);
+ if (HasWorkForConcurrentMarking(marking_worklists_)) {
+ // Notifies the scheduler that max concurrency might have increased.
+ // This will adjust the number of markers if necessary.
+ IncreaseMarkingPriorityIfNeeded();
+ concurrent_marking_handle_->NotifyConcurrencyIncrease();
+ return false;
+ }
+ return !concurrent_marking_handle_->IsActive();
+}
+
+void ConcurrentMarkerBase::IncreaseMarkingPriorityIfNeeded() {
+ if (!concurrent_marking_handle_->UpdatePriorityEnabled()) return;
+ if (concurrent_marking_priority_increased_) return;
+ // If concurrent tasks aren't executed, it might delay GC finalization.
+ // As long as GC is active so is the write barrier, which incurs a
+ // performance cost. Marking is estimated to take overall
+ // |MarkingSchedulingOracle::kEstimatedMarkingTimeMs|. If
+ // concurrent marking tasks have not reported any progress (i.e. the
+ // concurrently marked bytes count as not changed) in over
+ // |kMarkingScheduleRatioBeforeConcurrentPriorityIncrease| of
+ // that expected duration, we increase the concurrent task priority
+ // for the duration of the current GC. This is meant to prevent the
+ // GC from exceeding it's expected end time.
+ size_t current_concurrently_marked_bytes_ =
+ incremental_marking_schedule_.GetConcurrentlyMarkedBytes();
+ if (current_concurrently_marked_bytes_ > last_concurrently_marked_bytes_) {
+ last_concurrently_marked_bytes_ = current_concurrently_marked_bytes_;
+ last_concurrently_marked_bytes_update_ = v8::base::TimeTicks::Now();
+ } else if ((v8::base::TimeTicks::Now() -
+ last_concurrently_marked_bytes_update_)
+ .InMilliseconds() >
+ kMarkingScheduleRatioBeforeConcurrentPriorityIncrease *
+ IncrementalMarkingSchedule::kEstimatedMarkingTimeMs) {
+ concurrent_marking_handle_->UpdatePriority(
+ cppgc::TaskPriority::kUserBlocking);
+ concurrent_marking_priority_increased_ = true;
+ }
+}
+
+std::unique_ptr<Visitor> ConcurrentMarker::CreateConcurrentMarkingVisitor(
+ ConcurrentMarkingState& marking_state) const {
+ return std::make_unique<ConcurrentMarkingVisitor>(heap(), marking_state);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/concurrent-marker.h b/deps/v8/src/heap/cppgc/concurrent-marker.h
new file mode 100644
index 0000000000..4f0ec849d1
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/concurrent-marker.h
@@ -0,0 +1,76 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_CONCURRENT_MARKER_H_
+#define V8_HEAP_CPPGC_CONCURRENT_MARKER_H_
+
+#include "include/cppgc/platform.h"
+#include "src/heap/cppgc/incremental-marking-schedule.h"
+#include "src/heap/cppgc/marking-state.h"
+#include "src/heap/cppgc/marking-visitor.h"
+#include "src/heap/cppgc/marking-worklists.h"
+
+namespace cppgc {
+namespace internal {
+
+class V8_EXPORT_PRIVATE ConcurrentMarkerBase {
+ public:
+ ConcurrentMarkerBase(HeapBase&, MarkingWorklists&,
+ IncrementalMarkingSchedule&, cppgc::Platform*);
+ virtual ~ConcurrentMarkerBase();
+
+ ConcurrentMarkerBase(const ConcurrentMarkerBase&) = delete;
+ ConcurrentMarkerBase& operator=(const ConcurrentMarkerBase&) = delete;
+
+ void Start();
+ void Cancel();
+
+ void JoinForTesting();
+
+ bool NotifyIncrementalMutatorStepCompleted();
+
+ bool IsActive() const;
+
+ HeapBase& heap() const { return heap_; }
+ MarkingWorklists& marking_worklists() const { return marking_worklists_; }
+ IncrementalMarkingSchedule& incremental_marking_schedule() const {
+ return incremental_marking_schedule_;
+ }
+
+ virtual std::unique_ptr<Visitor> CreateConcurrentMarkingVisitor(
+ ConcurrentMarkingState&) const = 0;
+
+ protected:
+ void IncreaseMarkingPriorityIfNeeded();
+
+ private:
+ HeapBase& heap_;
+ MarkingWorklists& marking_worklists_;
+ IncrementalMarkingSchedule& incremental_marking_schedule_;
+ cppgc::Platform* const platform_;
+
+ // The job handle doubles as flag to denote concurrent marking was started.
+ std::unique_ptr<JobHandle> concurrent_marking_handle_{nullptr};
+
+ size_t last_concurrently_marked_bytes_ = 0;
+ v8::base::TimeTicks last_concurrently_marked_bytes_update_;
+ bool concurrent_marking_priority_increased_{false};
+};
+
+class V8_EXPORT_PRIVATE ConcurrentMarker : public ConcurrentMarkerBase {
+ public:
+ ConcurrentMarker(HeapBase& heap, MarkingWorklists& marking_worklists,
+ IncrementalMarkingSchedule& incremental_marking_schedule,
+ cppgc::Platform* platform)
+ : ConcurrentMarkerBase(heap, marking_worklists,
+ incremental_marking_schedule, platform) {}
+
+ std::unique_ptr<Visitor> CreateConcurrentMarkingVisitor(
+ ConcurrentMarkingState&) const final;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_CONCURRENT_MARKER_H_
diff --git a/deps/v8/src/heap/cppgc/default-job.h b/deps/v8/src/heap/cppgc/default-job.h
deleted file mode 100644
index 9ef6f3fb58..0000000000
--- a/deps/v8/src/heap/cppgc/default-job.h
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_CPPGC_DEFAULT_JOB_H_
-#define V8_HEAP_CPPGC_DEFAULT_JOB_H_
-
-#include <atomic>
-#include <map>
-#include <memory>
-#include <unordered_set>
-#include <vector>
-
-#include "include/cppgc/platform.h"
-#include "src/base/logging.h"
-#include "src/base/platform/mutex.h"
-
-namespace cppgc {
-namespace internal {
-
-template <typename Job>
-class DefaultJobFactory {
- public:
- static std::shared_ptr<Job> Create(std::unique_ptr<cppgc::JobTask> job_task) {
- std::shared_ptr<Job> job =
- std::make_shared<Job>(typename Job::Key(), std::move(job_task));
- job->NotifyConcurrencyIncrease();
- return job;
- }
-};
-
-template <typename Thread>
-class DefaultJobImpl {
- public:
- class JobDelegate;
- class JobHandle;
-
- class Key {
- private:
- Key() {}
-
- template <typename Job>
- friend class DefaultJobFactory;
- };
-
- DefaultJobImpl(Key, std::unique_ptr<cppgc::JobTask> job_task)
- : job_task_(std::move(job_task)) {}
-
- ~DefaultJobImpl() {
- Cancel();
- DCHECK_EQ(0, active_threads_.load(std::memory_order_relaxed));
- }
-
- void NotifyConcurrencyIncrease();
-
- void Join() {
- for (std::shared_ptr<Thread>& thread : job_threads_) thread->Join();
- job_threads_.clear();
- can_run_.store(false, std::memory_order_relaxed);
- }
-
- void Cancel() {
- can_run_.store(false, std::memory_order_relaxed);
- Join();
- }
-
- bool IsCompleted() const { return !IsRunning(); }
- bool IsRunning() const {
- uint8_t active_threads = active_threads_.load(std::memory_order_relaxed);
- return (active_threads + job_task_->GetMaxConcurrency(active_threads)) > 0;
- }
-
- bool CanRun() const { return can_run_.load(std::memory_order_relaxed); }
-
- void RunJobTask() {
- DCHECK_NOT_NULL(job_task_);
- NotifyJobThreadStart();
- JobDelegate delegate(this);
- job_task_->Run(&delegate);
- NotifyJobThreadEnd();
- }
-
- protected:
- virtual std::shared_ptr<Thread> CreateThread(DefaultJobImpl*) = 0;
-
- void NotifyJobThreadStart() {
- active_threads_.fetch_add(1, std::memory_order_relaxed);
- }
- void NotifyJobThreadEnd() {
- active_threads_.fetch_sub(1, std::memory_order_relaxed);
- }
-
- void GuaranteeAvailableIds(uint8_t max_threads) {
- if (max_threads <= highest_thread_count_) return;
- v8::base::MutexGuard guard(&ids_lock_);
- while (highest_thread_count_ < max_threads) {
- available_ids_.push_back(++highest_thread_count_);
- }
- }
-
- std::unique_ptr<cppgc::JobTask> job_task_;
- std::vector<std::shared_ptr<Thread>> job_threads_;
- std::atomic_bool can_run_{true};
- std::atomic<uint8_t> active_threads_{0};
-
- // Task id management.
- v8::base::Mutex ids_lock_;
- std::vector<uint8_t> available_ids_;
- uint8_t highest_thread_count_ = -1;
-};
-
-template <typename Thread>
-class DefaultJobImpl<Thread>::JobDelegate final : public cppgc::JobDelegate {
- public:
- explicit JobDelegate(DefaultJobImpl* job) : job_(job) {}
- ~JobDelegate() { ReleaseTaskId(); }
- bool ShouldYield() override { return !job_->CanRun(); }
- void NotifyConcurrencyIncrease() override {
- job_->NotifyConcurrencyIncrease();
- }
- uint8_t GetTaskId() override {
- AcquireTaskId();
- return job_thread_id_;
- }
-
- private:
- void AcquireTaskId() {
- if (job_thread_id_ != kInvalidTaskId) return;
- v8::base::MutexGuard guard(&job_->ids_lock_);
- job_thread_id_ = job_->available_ids_.back();
- DCHECK_NE(kInvalidTaskId, job_thread_id_);
- job_->available_ids_.pop_back();
- }
- void ReleaseTaskId() {
- if (job_thread_id_ == kInvalidTaskId) return;
- v8::base::MutexGuard guard(&job_->ids_lock_);
- job_->available_ids_.push_back(job_thread_id_);
- }
-
- DefaultJobImpl* const job_;
- static constexpr uint8_t kInvalidTaskId = std::numeric_limits<uint8_t>::max();
- uint8_t job_thread_id_ = kInvalidTaskId;
-};
-
-template <typename Thread>
-void DefaultJobImpl<Thread>::NotifyConcurrencyIncrease() {
- DCHECK(CanRun());
- static const size_t kMaxThreads = Thread::GetMaxSupportedConcurrency();
- uint8_t current_active_threads =
- active_threads_.load(std::memory_order_relaxed);
- size_t max_threads = std::min(
- kMaxThreads, job_task_->GetMaxConcurrency(current_active_threads));
- if (current_active_threads >= max_threads) return;
- DCHECK_LT(max_threads, std::numeric_limits<uint8_t>::max());
- GuaranteeAvailableIds(max_threads);
- for (uint8_t new_threads = max_threads - current_active_threads;
- new_threads > 0; --new_threads) {
- std::shared_ptr<Thread> thread = CreateThread(this);
- job_threads_.push_back(thread);
- }
-}
-
-template <typename Thread>
-class DefaultJobImpl<Thread>::JobHandle final : public cppgc::JobHandle {
- public:
- explicit JobHandle(std::shared_ptr<DefaultJobImpl> job)
- : job_(std::move(job)) {
- DCHECK_NOT_NULL(job_);
- }
-
- void NotifyConcurrencyIncrease() override {
- job_->NotifyConcurrencyIncrease();
- }
- void Join() override { job_->Join(); }
- void Cancel() override { job_->Cancel(); }
- bool IsCompleted() override { return job_->IsCompleted(); }
- bool IsRunning() override { return job_->IsRunning(); }
-
- private:
- std::shared_ptr<DefaultJobImpl> job_;
-};
-
-} // namespace internal
-} // namespace cppgc
-
-#endif // V8_HEAP_CPPGC_DEFAULT_JOB_H_
diff --git a/deps/v8/src/heap/cppgc/default-platform.cc b/deps/v8/src/heap/cppgc/default-platform.cc
deleted file mode 100644
index 0ac5440f7e..0000000000
--- a/deps/v8/src/heap/cppgc/default-platform.cc
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "include/cppgc/default-platform.h"
-
-#include <chrono> // NOLINT(build/c++11)
-#include <thread> // NOLINT(build/c++11)
-
-#include "src/base/logging.h"
-#include "src/base/page-allocator.h"
-#include "src/base/sys-info.h"
-#include "src/heap/cppgc/default-job.h"
-
-namespace cppgc {
-
-namespace internal {
-
-// Default implementation of Jobs based on std::thread.
-namespace {
-class DefaultJobThread final : private std::thread {
- public:
- template <typename Function>
- explicit DefaultJobThread(Function function)
- : std::thread(std::move(function)) {}
- ~DefaultJobThread() { DCHECK(!joinable()); }
-
- void Join() { join(); }
-
- static size_t GetMaxSupportedConcurrency() {
- return v8::base::SysInfo::NumberOfProcessors() - 1;
- }
-};
-} // namespace
-
-class DefaultJob final : public DefaultJobImpl<DefaultJobThread> {
- public:
- DefaultJob(Key key, std::unique_ptr<cppgc::JobTask> job_task)
- : DefaultJobImpl(key, std::move(job_task)) {}
-
- std::shared_ptr<DefaultJobThread> CreateThread(DefaultJobImpl* job) final {
- return std::make_shared<DefaultJobThread>([job = this] {
- DCHECK_NOT_NULL(job);
- job->RunJobTask();
- });
- }
-};
-
-} // namespace internal
-
-void DefaultTaskRunner::PostTask(std::unique_ptr<cppgc::Task> task) {
- tasks_.push_back(std::move(task));
-}
-
-void DefaultTaskRunner::PostDelayedTask(std::unique_ptr<cppgc::Task> task,
- double) {
- PostTask(std::move(task));
-}
-
-void DefaultTaskRunner::PostNonNestableTask(std::unique_ptr<cppgc::Task>) {
- UNREACHABLE();
-}
-
-void DefaultTaskRunner::PostNonNestableDelayedTask(std::unique_ptr<cppgc::Task>,
- double) {
- UNREACHABLE();
-}
-
-void DefaultTaskRunner::PostIdleTask(std::unique_ptr<cppgc::IdleTask> task) {
- idle_tasks_.push_back(std::move(task));
-}
-
-bool DefaultTaskRunner::RunSingleTask() {
- if (!tasks_.size()) return false;
-
- tasks_.back()->Run();
- tasks_.pop_back();
-
- return true;
-}
-
-bool DefaultTaskRunner::RunSingleIdleTask(double deadline_in_seconds) {
- if (!idle_tasks_.size()) return false;
-
- idle_tasks_.back()->Run(deadline_in_seconds);
- idle_tasks_.pop_back();
-
- return true;
-}
-
-void DefaultTaskRunner::RunUntilIdle() {
- for (auto& task : tasks_) {
- task->Run();
- }
- tasks_.clear();
-
- for (auto& task : idle_tasks_) {
- task->Run(std::numeric_limits<double>::infinity());
- }
- idle_tasks_.clear();
-}
-
-DefaultPlatform::DefaultPlatform()
- : page_allocator_(std::make_unique<v8::base::PageAllocator>()),
- foreground_task_runner_(std::make_shared<DefaultTaskRunner>()) {}
-
-DefaultPlatform::~DefaultPlatform() noexcept { WaitAllBackgroundTasks(); }
-
-cppgc::PageAllocator* DefaultPlatform::GetPageAllocator() {
- return page_allocator_.get();
-}
-
-double DefaultPlatform::MonotonicallyIncreasingTime() {
- return std::chrono::duration<double>(
- std::chrono::high_resolution_clock::now().time_since_epoch())
- .count();
-}
-
-std::shared_ptr<cppgc::TaskRunner> DefaultPlatform::GetForegroundTaskRunner() {
- return foreground_task_runner_;
-}
-
-std::unique_ptr<cppgc::JobHandle> DefaultPlatform::PostJob(
- cppgc::TaskPriority priority, std::unique_ptr<cppgc::JobTask> job_task) {
- std::shared_ptr<internal::DefaultJob> job =
- internal::DefaultJobFactory<internal::DefaultJob>::Create(
- std::move(job_task));
- jobs_.push_back(job);
- return std::make_unique<internal::DefaultJob::JobHandle>(std::move(job));
-}
-
-void DefaultPlatform::WaitAllForegroundTasks() {
- foreground_task_runner_->RunUntilIdle();
-}
-
-void DefaultPlatform::WaitAllBackgroundTasks() {
- for (auto& job : jobs_) {
- job->Join();
- }
- jobs_.clear();
-}
-
-} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/garbage-collector.h b/deps/v8/src/heap/cppgc/garbage-collector.h
index 1fc7ed925d..e5f6641bdf 100644
--- a/deps/v8/src/heap/cppgc/garbage-collector.h
+++ b/deps/v8/src/heap/cppgc/garbage-collector.h
@@ -19,7 +19,7 @@ class GarbageCollector {
using CollectionType = Marker::MarkingConfig::CollectionType;
using StackState = cppgc::Heap::StackState;
using MarkingType = Marker::MarkingConfig::MarkingType;
- using SweepingType = Sweeper::Config;
+ using SweepingType = Sweeper::SweepingConfig::SweepingType;
static constexpr Config ConservativeAtomicConfig() {
return {CollectionType::kMajor, StackState::kMayContainHeapPointers,
diff --git a/deps/v8/src/heap/cppgc/gc-info-table.h b/deps/v8/src/heap/cppgc/gc-info-table.h
index 749f30b258..c8ed97ad7d 100644
--- a/deps/v8/src/heap/cppgc/gc-info-table.h
+++ b/deps/v8/src/heap/cppgc/gc-info-table.h
@@ -23,9 +23,8 @@ namespace internal {
struct GCInfo final {
FinalizationCallback finalize;
TraceCallback trace;
+ NameCallback name;
bool has_v_table;
- // Keep sizeof(GCInfo) a power of 2.
- size_t padding = 0;
};
class V8_EXPORT GCInfoTable final {
diff --git a/deps/v8/src/heap/cppgc/gc-info.cc b/deps/v8/src/heap/cppgc/gc-info.cc
index 70970139b1..57d49fb322 100644
--- a/deps/v8/src/heap/cppgc/gc-info.cc
+++ b/deps/v8/src/heap/cppgc/gc-info.cc
@@ -11,9 +11,10 @@ namespace internal {
RegisteredGCInfoIndex::RegisteredGCInfoIndex(
FinalizationCallback finalization_callback, TraceCallback trace_callback,
- bool has_v_table)
+ NameCallback name_callback, bool has_v_table)
: index_(GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
- {finalization_callback, trace_callback, has_v_table})) {}
+ {finalization_callback, trace_callback, name_callback,
+ has_v_table})) {}
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/globals.h b/deps/v8/src/heap/cppgc/globals.h
index d286a7fa42..747b194fea 100644
--- a/deps/v8/src/heap/cppgc/globals.h
+++ b/deps/v8/src/heap/cppgc/globals.h
@@ -20,6 +20,9 @@ constexpr size_t kKB = 1024;
constexpr size_t kMB = kKB * 1024;
constexpr size_t kGB = kMB * 1024;
+// AccessMode used for choosing between atomic and non-atomic accesses.
+enum class AccessMode : uint8_t { kNonAtomic, kAtomic };
+
// See 6.7.6 (http://eel.is/c++draft/basic.align) for alignment restrictions. We
// do not fully support all alignment restrictions (following
// alignof(std​::​max_­align_­t)) but limit to alignof(double).
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index 5a92c4f159..50edce4b4e 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -53,8 +53,10 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
} // namespace
-HeapBase::HeapBase(std::shared_ptr<cppgc::Platform> platform,
- size_t custom_spaces, StackSupport stack_support)
+HeapBase::HeapBase(
+ std::shared_ptr<cppgc::Platform> platform,
+ const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
+ StackSupport stack_support)
: raw_heap_(this, custom_spaces),
platform_(std::move(platform)),
#if defined(CPPGC_CAGED_HEAP)
@@ -68,6 +70,7 @@ HeapBase::HeapBase(std::shared_ptr<cppgc::Platform> platform,
stack_(std::make_unique<heap::base::Stack>(
v8::base::Stack::GetStackStart())),
prefinalizer_handler_(std::make_unique<PreFinalizerHandler>()),
+ compactor_(raw_heap_),
object_allocator_(&raw_heap_, page_backend_.get(),
stats_collector_.get()),
sweeper_(&raw_heap_, platform_.get(), stats_collector_.get()),
@@ -86,10 +89,6 @@ HeapBase::NoGCScope::NoGCScope(HeapBase& heap) : heap_(heap) {
HeapBase::NoGCScope::~NoGCScope() { heap_.no_gc_scope_--; }
-void HeapBase::VerifyMarking(cppgc::Heap::StackState stack_state) {
- MarkingVerifier verifier(*this, stack_state);
-}
-
void HeapBase::AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded() {
if (marker_) marker_->AdvanceMarkingOnAllocation();
}
diff --git a/deps/v8/src/heap/cppgc/heap-base.h b/deps/v8/src/heap/cppgc/heap-base.h
index efc4dbd40d..f685d94ea5 100644
--- a/deps/v8/src/heap/cppgc/heap-base.h
+++ b/deps/v8/src/heap/cppgc/heap-base.h
@@ -12,6 +12,7 @@
#include "include/cppgc/internal/persistent-node.h"
#include "include/cppgc/macros.h"
#include "src/base/macros.h"
+#include "src/heap/cppgc/compactor.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/object-allocator.h"
#include "src/heap/cppgc/raw-heap.h"
@@ -62,7 +63,8 @@ class V8_EXPORT_PRIVATE HeapBase {
HeapBase& heap_;
};
- HeapBase(std::shared_ptr<cppgc::Platform> platform, size_t custom_spaces,
+ HeapBase(std::shared_ptr<cppgc::Platform> platform,
+ const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
StackSupport stack_support);
virtual ~HeapBase();
@@ -96,6 +98,8 @@ class V8_EXPORT_PRIVATE HeapBase {
MarkerBase* marker() const { return marker_.get(); }
+ Compactor& compactor() { return compactor_; }
+
ObjectAllocator& object_allocator() { return object_allocator_; }
Sweeper& sweeper() { return sweeper_; }
@@ -112,6 +116,18 @@ class V8_EXPORT_PRIVATE HeapBase {
const PersistentRegion& GetWeakPersistentRegion() const {
return weak_persistent_region_;
}
+ PersistentRegion& GetStrongCrossThreadPersistentRegion() {
+ return strong_cross_thread_persistent_region_;
+ }
+ const PersistentRegion& GetStrongCrossThreadPersistentRegion() const {
+ return strong_cross_thread_persistent_region_;
+ }
+ PersistentRegion& GetWeakCrossThreadPersistentRegion() {
+ return weak_cross_thread_persistent_region_;
+ }
+ const PersistentRegion& GetWeakCrossThreadPersistentRegion() const {
+ return weak_cross_thread_persistent_region_;
+ }
#if defined(CPPGC_YOUNG_GENERATION)
std::set<void*>& remembered_slots() { return remembered_slots_; }
@@ -124,8 +140,6 @@ class V8_EXPORT_PRIVATE HeapBase {
void AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
protected:
- void VerifyMarking(cppgc::Heap::StackState);
-
virtual void FinalizeIncrementalGarbageCollectionIfNeeded(
cppgc::Heap::StackState) = 0;
@@ -143,11 +157,14 @@ class V8_EXPORT_PRIVATE HeapBase {
std::unique_ptr<PreFinalizerHandler> prefinalizer_handler_;
std::unique_ptr<MarkerBase> marker_;
+ Compactor compactor_;
ObjectAllocator object_allocator_;
Sweeper sweeper_;
PersistentRegion strong_persistent_region_;
PersistentRegion weak_persistent_region_;
+ PersistentRegion strong_cross_thread_persistent_region_;
+ PersistentRegion weak_cross_thread_persistent_region_;
#if defined(CPPGC_YOUNG_GENERATION)
std::set<void*> remembered_slots_;
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.cc b/deps/v8/src/heap/cppgc/heap-object-header.cc
index ad6c570081..4ed2cf73ba 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.cc
+++ b/deps/v8/src/heap/cppgc/heap-object-header.cc
@@ -27,5 +27,15 @@ void HeapObjectHeader::Finalize() {
}
}
+HeapObjectName HeapObjectHeader::GetName() const {
+ const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex());
+ return gc_info.name(Payload());
+}
+
+void HeapObjectHeader::Trace(Visitor* visitor) const {
+ const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex());
+ return gc_info.trace(visitor, Payload());
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.h b/deps/v8/src/heap/cppgc/heap-object-header.h
index 1a49701f08..ce850453b6 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.h
+++ b/deps/v8/src/heap/cppgc/heap-object-header.h
@@ -11,6 +11,7 @@
#include "include/cppgc/allocation.h"
#include "include/cppgc/internal/gc-info.h"
+#include "include/cppgc/internal/name-trait.h"
#include "src/base/atomic-utils.h"
#include "src/base/bit-field.h"
#include "src/base/logging.h"
@@ -19,6 +20,9 @@
#include "src/heap/cppgc/globals.h"
namespace cppgc {
+
+class Visitor;
+
namespace internal {
// HeapObjectHeader contains meta data per object and is prepended to each
@@ -49,8 +53,6 @@ namespace internal {
// to allow potentially accessing them non-atomically.
class HeapObjectHeader {
public:
- enum class AccessMode : uint8_t { kNonAtomic, kAtomic };
-
static constexpr size_t kSizeLog2 = 17;
static constexpr size_t kMaxSize = (size_t{1} << kSizeLog2) - 1;
static constexpr uint16_t kLargeObjectSizeInHeader = 0;
@@ -93,6 +95,10 @@ class HeapObjectHeader {
inline bool IsFinalizable() const;
void Finalize();
+ V8_EXPORT_PRIVATE HeapObjectName GetName() const;
+
+ V8_EXPORT_PRIVATE void Trace(Visitor*) const;
+
private:
enum class EncodedHalf : uint8_t { kLow, kHigh };
@@ -152,8 +158,16 @@ HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
DCHECK_EQ(0u, size & (sizeof(HeapObjectHeader) - 1));
DCHECK_GE(kMaxSize, size);
- encoded_high_ = GCInfoIndexField::encode(gc_info_index);
encoded_low_ = EncodeSize(size);
+ // Objects may get published to the marker without any other synchronization
+ // (e.g., write barrier) in which case the in-construction bit is read
+ // concurrently which requires reading encoded_high_ atomically. It is ok if
+ // this write is not observed by the marker, since the sweeper sets the
+ // in-construction bit to 0 and we can rely on that to guarantee a correct
+ // answer when checking if objects are in-construction.
+ v8::base::AsAtomicPtr(&encoded_high_)
+ ->store(GCInfoIndexField::encode(gc_info_index),
+ std::memory_order_relaxed);
DCHECK(IsInConstruction());
#ifdef DEBUG
CheckApiConstants();
@@ -165,14 +179,14 @@ Address HeapObjectHeader::Payload() const {
sizeof(HeapObjectHeader);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
GCInfoIndex HeapObjectHeader::GetGCInfoIndex() const {
const uint16_t encoded =
LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
return GCInfoIndexField::decode(encoded);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
size_t HeapObjectHeader::GetSize() const {
// Size is immutable after construction while either marking or sweeping
// is running so relaxed load (if mode == kAtomic) is enough.
@@ -187,12 +201,12 @@ void HeapObjectHeader::SetSize(size_t size) {
encoded_low_ |= EncodeSize(size);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
bool HeapObjectHeader::IsLargeObject() const {
return GetSize<mode>() == kLargeObjectSizeInHeader;
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
bool HeapObjectHeader::IsInConstruction() const {
const uint16_t encoded =
LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
@@ -203,14 +217,14 @@ void HeapObjectHeader::MarkAsFullyConstructed() {
MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed(Payload());
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
bool HeapObjectHeader::IsMarked() const {
const uint16_t encoded =
LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
return MarkBitField::decode(encoded);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
void HeapObjectHeader::Unmark() {
DCHECK(IsMarked<mode>());
StoreEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>(
@@ -228,14 +242,14 @@ bool HeapObjectHeader::TryMarkAtomic() {
std::memory_order_relaxed);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
bool HeapObjectHeader::IsYoung() const {
return !IsMarked<mode>();
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
bool HeapObjectHeader::IsFree() const {
- return GetGCInfoIndex() == kFreeListGCInfoIndex;
+ return GetGCInfoIndex<mode>() == kFreeListGCInfoIndex;
}
bool HeapObjectHeader::IsFinalizable() const {
@@ -243,7 +257,7 @@ bool HeapObjectHeader::IsFinalizable() const {
return gc_info.finalize;
}
-template <HeapObjectHeader::AccessMode mode, HeapObjectHeader::EncodedHalf part,
+template <AccessMode mode, HeapObjectHeader::EncodedHalf part,
std::memory_order memory_order>
uint16_t HeapObjectHeader::LoadEncoded() const {
const uint16_t& half =
@@ -252,7 +266,7 @@ uint16_t HeapObjectHeader::LoadEncoded() const {
return v8::base::AsAtomicPtr(&half)->load(memory_order);
}
-template <HeapObjectHeader::AccessMode mode, HeapObjectHeader::EncodedHalf part,
+template <AccessMode mode, HeapObjectHeader::EncodedHalf part,
std::memory_order memory_order>
void HeapObjectHeader::StoreEncoded(uint16_t bits, uint16_t mask) {
// Caveat: Not all changes to HeapObjectHeader's bitfields go through
diff --git a/deps/v8/src/heap/cppgc/heap-page.cc b/deps/v8/src/heap/cppgc/heap-page.cc
index 1ac7fe7fee..b2b3d83182 100644
--- a/deps/v8/src/heap/cppgc/heap-page.cc
+++ b/deps/v8/src/heap/cppgc/heap-page.cc
@@ -112,6 +112,7 @@ NormalPage* NormalPage::Create(PageBackend* page_backend,
DCHECK_NOT_NULL(space);
void* memory = page_backend->AllocateNormalPageMemory(space->index());
auto* normal_page = new (memory) NormalPage(space->raw_heap()->heap(), space);
+ normal_page->SynchronizedStore();
return normal_page;
}
@@ -189,6 +190,7 @@ LargePage* LargePage::Create(PageBackend* page_backend, LargePageSpace* space,
auto* heap = space->raw_heap()->heap();
void* memory = page_backend->AllocateLargePageMemory(allocation_size);
LargePage* page = new (memory) LargePage(heap, space, size);
+ page->SynchronizedStore();
return page;
}
diff --git a/deps/v8/src/heap/cppgc/heap-page.h b/deps/v8/src/heap/cppgc/heap-page.h
index fbf6059ad7..bc3762b4ae 100644
--- a/deps/v8/src/heap/cppgc/heap-page.h
+++ b/deps/v8/src/heap/cppgc/heap-page.h
@@ -48,11 +48,9 @@ class V8_EXPORT_PRIVATE BasePage {
ConstAddress PayloadEnd() const;
// |address| must refer to real object.
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
HeapObjectHeader& ObjectHeaderFromInnerAddress(void* address) const;
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
const HeapObjectHeader& ObjectHeaderFromInnerAddress(
const void* address) const;
@@ -63,8 +61,24 @@ class V8_EXPORT_PRIVATE BasePage {
const HeapObjectHeader* TryObjectHeaderFromInnerAddress(
const void* address) const;
+ // SynchronizedLoad and SynchronizedStore are used to sync pages after they
+ // are allocated. std::atomic_thread_fence is sufficient in practice but is
+ // not recognized by tsan. Atomic load and store of the |type_| field are
+ // added for tsan builds.
+ void SynchronizedLoad() const {
+#if defined(THREAD_SANITIZER)
+ v8::base::AsAtomicPtr(&type_)->load(std::memory_order_acquire);
+#endif
+ }
+ void SynchronizedStore() {
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+#if defined(THREAD_SANITIZER)
+ v8::base::AsAtomicPtr(&type_)->store(type_, std::memory_order_release);
+#endif
+ }
+
protected:
- enum class PageType { kNormal, kLarge };
+ enum class PageType : uint8_t { kNormal, kLarge };
BasePage(HeapBase*, BaseSpace*, PageType);
private:
@@ -221,8 +235,7 @@ const BasePage* BasePage::FromPayload(const void* payload) {
kGuardPageSize);
}
-template <HeapObjectHeader::AccessMode mode =
- HeapObjectHeader::AccessMode::kNonAtomic>
+template <AccessMode mode = AccessMode::kNonAtomic>
const HeapObjectHeader* ObjectHeaderFromInnerAddressImpl(const BasePage* page,
const void* address) {
if (page->is_large()) {
@@ -232,21 +245,27 @@ const HeapObjectHeader* ObjectHeaderFromInnerAddressImpl(const BasePage* page,
NormalPage::From(page)->object_start_bitmap();
const HeapObjectHeader* header =
bitmap.FindHeader<mode>(static_cast<ConstAddress>(address));
- DCHECK_LT(address,
- reinterpret_cast<ConstAddress>(header) +
- header->GetSize<HeapObjectHeader::AccessMode::kAtomic>());
+ DCHECK_LT(address, reinterpret_cast<ConstAddress>(header) +
+ header->GetSize<AccessMode::kAtomic>());
return header;
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(void* address) const {
return const_cast<HeapObjectHeader&>(
ObjectHeaderFromInnerAddress<mode>(const_cast<const void*>(address)));
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
const HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(
const void* address) const {
+ // This method might be called for |address| found via a Trace method of
+ // another object. If |address| is on a newly allocated page , there will
+ // be no sync between the page allocation and a concurrent marking thread,
+ // resulting in a race with page initialization (specifically with writing
+ // the page |type_| field). This can occur when tracing a Member holding a
+ // reference to a mixin type
+ SynchronizedLoad();
const HeapObjectHeader* header =
ObjectHeaderFromInnerAddressImpl<mode>(this, address);
DCHECK_NE(kFreeListGCInfoIndex, header->GetGCInfoIndex());
diff --git a/deps/v8/src/heap/cppgc/heap-space.cc b/deps/v8/src/heap/cppgc/heap-space.cc
index 7b15ba2254..9a78b44433 100644
--- a/deps/v8/src/heap/cppgc/heap-space.cc
+++ b/deps/v8/src/heap/cppgc/heap-space.cc
@@ -14,8 +14,11 @@
namespace cppgc {
namespace internal {
-BaseSpace::BaseSpace(RawHeap* heap, size_t index, PageType type)
- : heap_(heap), index_(index), type_(type) {}
+BaseSpace::BaseSpace(RawHeap* heap, size_t index, PageType type,
+ bool is_compactable)
+ : heap_(heap), index_(index), type_(type), is_compactable_(is_compactable) {
+ USE(is_compactable_);
+}
void BaseSpace::AddPage(BasePage* page) {
v8::base::LockGuard<v8::base::Mutex> lock(&pages_mutex_);
@@ -36,11 +39,12 @@ BaseSpace::Pages BaseSpace::RemoveAllPages() {
return pages;
}
-NormalPageSpace::NormalPageSpace(RawHeap* heap, size_t index)
- : BaseSpace(heap, index, PageType::kNormal) {}
+NormalPageSpace::NormalPageSpace(RawHeap* heap, size_t index,
+ bool is_compactable)
+ : BaseSpace(heap, index, PageType::kNormal, is_compactable) {}
LargePageSpace::LargePageSpace(RawHeap* heap, size_t index)
- : BaseSpace(heap, index, PageType::kLarge) {}
+ : BaseSpace(heap, index, PageType::kLarge, false /* is_compactable */) {}
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-space.h b/deps/v8/src/heap/cppgc/heap-space.h
index a7e50d4f48..ac6dbba65c 100644
--- a/deps/v8/src/heap/cppgc/heap-space.h
+++ b/deps/v8/src/heap/cppgc/heap-space.h
@@ -47,9 +47,12 @@ class V8_EXPORT_PRIVATE BaseSpace {
void RemovePage(BasePage*);
Pages RemoveAllPages();
+ bool is_compactable() const { return is_compactable_; }
+
protected:
enum class PageType { kNormal, kLarge };
- explicit BaseSpace(RawHeap* heap, size_t index, PageType type);
+ explicit BaseSpace(RawHeap* heap, size_t index, PageType type,
+ bool is_compactable);
private:
RawHeap* heap_;
@@ -57,6 +60,7 @@ class V8_EXPORT_PRIVATE BaseSpace {
v8::base::Mutex pages_mutex_;
const size_t index_;
const PageType type_;
+ const bool is_compactable_;
};
class V8_EXPORT_PRIVATE NormalPageSpace final : public BaseSpace {
@@ -92,7 +96,7 @@ class V8_EXPORT_PRIVATE NormalPageSpace final : public BaseSpace {
return From(const_cast<BaseSpace*>(space));
}
- NormalPageSpace(RawHeap* heap, size_t index);
+ NormalPageSpace(RawHeap* heap, size_t index, bool is_compactable);
LinearAllocationBuffer& linear_allocation_buffer() { return current_lab_; }
const LinearAllocationBuffer& linear_allocation_buffer() const {
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index 0db04fb537..3da59fd1ee 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -10,6 +10,7 @@
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/marking-verifier.h"
#include "src/heap/cppgc/prefinalizer-handler.h"
namespace cppgc {
@@ -77,7 +78,7 @@ void CheckConfig(Heap::Config config) {
Heap::Heap(std::shared_ptr<cppgc::Platform> platform,
cppgc::Heap::HeapOptions options)
- : HeapBase(platform, options.custom_spaces.size(), options.stack_support),
+ : HeapBase(platform, options.custom_spaces, options.stack_support),
gc_invoker_(this, platform_.get(), options.stack_support),
growing_(&gc_invoker_, stats_collector_.get(),
options.resource_constraints) {}
@@ -151,21 +152,26 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
DCHECK(!in_no_gc_scope());
config_.stack_state = stack_state;
DCHECK(marker_);
- marker_->FinishMarking(stack_state);
{
- // Pre finalizers are forbidden from allocating objects.
+ // Pre finalizers are forbidden from allocating objects. Note that this also
+ // guard atomic pause marking below, meaning that no internal method or
+ // external callbacks are allowed to allocate new objects.
ObjectAllocator::NoAllocationScope no_allocation_scope_(object_allocator_);
- marker_->ProcessWeakness();
+ marker_->FinishMarking(stack_state);
prefinalizer_handler_->InvokePreFinalizers();
}
marker_.reset();
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
- VerifyMarking(stack_state);
+ MarkingVerifier verifier(*this);
+ verifier.Run(stack_state);
#endif
{
NoGCScope no_gc(*this);
- sweeper_.Start(config_.sweeping_type);
+ const Sweeper::SweepingConfig sweeping_config{
+ config_.sweeping_type,
+ Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep};
+ sweeper_.Start(sweeping_config);
}
gc_in_progress_ = false;
}
diff --git a/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc b/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
index 7e1ff951ab..cef34b1efe 100644
--- a/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
+++ b/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
@@ -11,6 +11,9 @@
namespace cppgc {
namespace internal {
+// static
+constexpr size_t IncrementalMarkingSchedule::kInvalidLastEstimatedLiveBytes;
+
const double IncrementalMarkingSchedule::kEstimatedMarkingTimeMs = 500.0;
const size_t IncrementalMarkingSchedule::kMinimumMarkedBytesPerIncrementalStep =
64 * kKB;
@@ -32,9 +35,12 @@ void IncrementalMarkingSchedule::AddConcurrentlyMarkedBytes(
concurrently_marked_bytes_.fetch_add(marked_bytes, std::memory_order_relaxed);
}
-size_t IncrementalMarkingSchedule::GetOverallMarkedBytes() {
- return incrementally_marked_bytes_ +
- concurrently_marked_bytes_.load(std::memory_order_relaxed);
+size_t IncrementalMarkingSchedule::GetOverallMarkedBytes() const {
+ return incrementally_marked_bytes_ + GetConcurrentlyMarkedBytes();
+}
+
+size_t IncrementalMarkingSchedule::GetConcurrentlyMarkedBytes() const {
+ return concurrently_marked_bytes_.load(std::memory_order_relaxed);
}
double IncrementalMarkingSchedule::GetElapsedTimeInMs(
@@ -49,6 +55,7 @@ double IncrementalMarkingSchedule::GetElapsedTimeInMs(
size_t IncrementalMarkingSchedule::GetNextIncrementalStepDuration(
size_t estimated_live_bytes) {
+ last_estimated_live_bytes_ = estimated_live_bytes;
DCHECK(!incremental_marking_start_time_.IsNull());
double elapsed_time_in_ms =
GetElapsedTimeInMs(incremental_marking_start_time_);
@@ -70,5 +77,17 @@ size_t IncrementalMarkingSchedule::GetNextIncrementalStepDuration(
expected_marked_bytes - actual_marked_bytes);
}
+constexpr double
+ IncrementalMarkingSchedule::kEphemeronPairsFlushingRatioIncrements;
+bool IncrementalMarkingSchedule::ShouldFlushEphemeronPairs() {
+ DCHECK_NE(kInvalidLastEstimatedLiveBytes, last_estimated_live_bytes_);
+ if (GetOverallMarkedBytes() <
+ (ephemeron_pairs_flushing_ratio_target * last_estimated_live_bytes_))
+ return false;
+ ephemeron_pairs_flushing_ratio_target +=
+ kEphemeronPairsFlushingRatioIncrements;
+ return true;
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/incremental-marking-schedule.h b/deps/v8/src/heap/cppgc/incremental-marking-schedule.h
index 3c8a9e1a01..a9a0f7d840 100644
--- a/deps/v8/src/heap/cppgc/incremental-marking-schedule.h
+++ b/deps/v8/src/heap/cppgc/incremental-marking-schedule.h
@@ -26,7 +26,8 @@ class V8_EXPORT_PRIVATE IncrementalMarkingSchedule {
void UpdateIncrementalMarkedBytes(size_t);
void AddConcurrentlyMarkedBytes(size_t);
- size_t GetOverallMarkedBytes();
+ size_t GetOverallMarkedBytes() const;
+ size_t GetConcurrentlyMarkedBytes() const;
size_t GetNextIncrementalStepDuration(size_t);
@@ -34,6 +35,8 @@ class V8_EXPORT_PRIVATE IncrementalMarkingSchedule {
elapsed_time_for_testing_ = elapsed_time;
}
+ bool ShouldFlushEphemeronPairs();
+
private:
double GetElapsedTimeInMs(v8::base::TimeTicks);
@@ -45,6 +48,11 @@ class V8_EXPORT_PRIVATE IncrementalMarkingSchedule {
// Using -1 as sentinel to denote
static constexpr double kNoSetElapsedTimeForTesting = -1;
double elapsed_time_for_testing_ = kNoSetElapsedTimeForTesting;
+
+ static constexpr size_t kInvalidLastEstimatedLiveBytes = -1;
+ size_t last_estimated_live_bytes_ = kInvalidLastEstimatedLiveBytes;
+ double ephemeron_pairs_flushing_ratio_target = 0.25;
+ static constexpr double kEphemeronPairsFlushingRatioIncrements = 0.25;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index 0d044588b6..236bc12af4 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -15,6 +15,7 @@
#include "src/heap/cppgc/liveness-broker.h"
#include "src/heap/cppgc/marking-state.h"
#include "src/heap/cppgc/marking-visitor.h"
+#include "src/heap/cppgc/process-heap.h"
#include "src/heap/cppgc/stats-collector.h"
#if defined(CPPGC_CAGED_HEAP)
@@ -55,7 +56,8 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
}
// Visit remembered set that was recorded in the generational barrier.
-void VisitRememberedSlots(HeapBase& heap, MarkingState& marking_state) {
+void VisitRememberedSlots(HeapBase& heap,
+ MutatorMarkingState& mutator_marking_state) {
#if defined(CPPGC_YOUNG_GENERATION)
for (void* slot : heap.remembered_slots()) {
auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
@@ -65,11 +67,10 @@ void VisitRememberedSlots(HeapBase& heap, MarkingState& marking_state) {
// top level (with the guarantee that no objects are currently being in
// construction). This can be ensured by running young GCs from safe points
// or by reintroducing nested allocation scopes that avoid finalization.
- DCHECK(
- !header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>());
+ DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
void* value = *reinterpret_cast<void**>(slot);
- marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
+ mutator_marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
}
#endif
}
@@ -86,32 +87,13 @@ void ResetRememberedSet(HeapBase& heap) {
static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
- typename WorklistLocal, typename Callback, typename Predicate>
-bool DrainWorklistWithDeadline(Predicate should_yield,
- WorklistLocal& worklist_local,
- Callback callback) {
- size_t processed_callback_count = 0;
- typename WorklistLocal::ItemType item;
- while (worklist_local.Pop(&item)) {
- callback(item);
- if (processed_callback_count-- == 0) {
- if (should_yield()) {
- return false;
- }
- processed_callback_count = kDeadlineCheckInterval;
- }
- }
- return true;
-}
-
-template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename WorklistLocal, typename Callback>
-bool DrainWorklistWithBytesAndTimeDeadline(MarkingState& marking_state,
+bool DrainWorklistWithBytesAndTimeDeadline(MarkingStateBase& marking_state,
size_t marked_bytes_deadline,
v8::base::TimeTicks time_deadline,
WorklistLocal& worklist_local,
Callback callback) {
- return DrainWorklistWithDeadline(
+ return DrainWorklistWithPredicate<kDeadlineCheckInterval>(
[&marking_state, marked_bytes_deadline, time_deadline]() {
return (marked_bytes_deadline <= marking_state.marked_bytes()) ||
(time_deadline <= v8::base::TimeTicks::Now());
@@ -119,15 +101,6 @@ bool DrainWorklistWithBytesAndTimeDeadline(MarkingState& marking_state,
worklist_local, callback);
}
-void TraceMarkedObject(Visitor* visitor, const HeapObjectHeader* header) {
- DCHECK(header);
- DCHECK(!header->IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>());
- DCHECK(header->IsMarked<HeapObjectHeader::AccessMode::kNonAtomic>());
- const GCInfo& gcinfo =
- GlobalGCInfoTable::GCInfoFromIndex(header->GetGCInfoIndex());
- gcinfo.trace(visitor, header->Payload());
-}
-
size_t GetNextIncrementalStepDuration(IncrementalMarkingSchedule& schedule,
HeapBase& heap) {
return schedule.GetNextIncrementalStepDuration(
@@ -150,7 +123,7 @@ MarkerBase::IncrementalMarkingTask::Post(cppgc::TaskRunner* runner,
MarkerBase* marker) {
// Incremental GC is possible only via the GCInvoker, so getting here
// guarantees that either non-nestable tasks or conservative stack
- // scannnig are supported. This is required so that the incremental
+ // scanning are supported. This is required so that the incremental
// task can safely finalize GC if needed.
DCHECK_IMPLIES(marker->heap().stack_support() !=
HeapBase::StackSupport::kSupportsConservativeStackScan,
@@ -185,7 +158,8 @@ MarkerBase::MarkerBase(Key, HeapBase& heap, cppgc::Platform* platform,
config_(config),
platform_(platform),
foreground_task_runner_(platform_->GetForegroundTaskRunner()),
- mutator_marking_state_(heap, marking_worklists_) {}
+ mutator_marking_state_(heap, marking_worklists_,
+ heap.compactor().compaction_worklists()) {}
MarkerBase::~MarkerBase() {
// The fixed point iteration may have found not-fully-constructed objects.
@@ -194,19 +168,33 @@ MarkerBase::~MarkerBase() {
if (!marking_worklists_.not_fully_constructed_worklist()->IsEmpty()) {
#if DEBUG
DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state);
- HeapObjectHeader* header;
- MarkingWorklists::NotFullyConstructedWorklist::Local& local =
- mutator_marking_state_.not_fully_constructed_worklist();
- while (local.Pop(&header)) {
- DCHECK(header->IsMarked());
- }
+ std::unordered_set<HeapObjectHeader*> objects =
+ mutator_marking_state_.not_fully_constructed_worklist().Extract();
+ for (HeapObjectHeader* object : objects) DCHECK(object->IsMarked());
#else
marking_worklists_.not_fully_constructed_worklist()->Clear();
#endif
}
+
+ // |discovered_ephemeron_pairs_worklist_| may still hold ephemeron pairs with
+ // dead keys.
+ if (!marking_worklists_.discovered_ephemeron_pairs_worklist()->IsEmpty()) {
+#if DEBUG
+ MarkingWorklists::EphemeronPairItem item;
+ while (mutator_marking_state_.discovered_ephemeron_pairs_worklist().Pop(
+ &item)) {
+ DCHECK(!HeapObjectHeader::FromPayload(item.key).IsMarked());
+ }
+#else
+ marking_worklists_.discovered_ephemeron_pairs_worklist()->Clear();
+#endif
+ }
+
+ marking_worklists_.weak_containers_worklist()->Clear();
}
void MarkerBase::StartMarking() {
+ DCHECK(!is_marking_started_);
heap().stats_collector()->NotifyMarkingStarted();
is_marking_started_ = true;
@@ -216,21 +204,34 @@ void MarkerBase::StartMarking() {
// Scanning the stack is expensive so we only do it at the atomic pause.
VisitRoots(MarkingConfig::StackState::kNoHeapPointers);
ScheduleIncrementalMarkingTask();
+ if (config_.marking_type ==
+ MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ mutator_marking_state_.Publish();
+ concurrent_marker_->Start();
+ }
}
}
void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
- // Cancel remaining incremental tasks.
- if (incremental_marking_handle_) incremental_marking_handle_.Cancel();
+ // Cancel remaining concurrent/incremental tasks.
+ concurrent_marker_->Cancel();
+ incremental_marking_handle_.Cancel();
}
config_.stack_state = stack_state;
config_.marking_type = MarkingConfig::MarkingType::kAtomic;
+ // Lock guards against changes to {Weak}CrossThreadPersistent handles, that
+ // may conflict with marking. E.g., a WeakCrossThreadPersistent may be
+ // converted into a CrossThreadPersistent which requires that the handle
+ // is either cleared or the object is retained.
+ g_process_mutex.Pointer()->Lock();
+
// VisitRoots also resets the LABs.
VisitRoots(config_.stack_state);
if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
mutator_marking_state_.FlushNotFullyConstructedObjects();
+ DCHECK(marking_worklists_.not_fully_constructed_worklist()->IsEmpty());
} else {
MarkNotFullyConstructedObjects();
}
@@ -242,20 +243,26 @@ void MarkerBase::LeaveAtomicPause() {
heap().stats_collector()->NotifyMarkingCompleted(
// GetOverallMarkedBytes also includes concurrently marked bytes.
schedule_.GetOverallMarkedBytes());
+ is_marking_started_ = false;
+ ProcessWeakness();
+ g_process_mutex.Pointer()->Unlock();
}
void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
DCHECK(is_marking_started_);
EnterAtomicPause(stack_state);
- ProcessWorklistsWithDeadline(std::numeric_limits<size_t>::max(),
- v8::base::TimeTicks::Max());
+ CHECK(ProcessWorklistsWithDeadline(std::numeric_limits<size_t>::max(),
+ v8::base::TimeTicks::Max()));
mutator_marking_state_.Publish();
LeaveAtomicPause();
- is_marking_started_ = false;
}
void MarkerBase::ProcessWeakness() {
+ DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
heap().GetWeakPersistentRegion().Trace(&visitor());
+ // Processing cross-thread handles requires taking the process lock.
+ g_process_mutex.Get().AssertHeld();
+ heap().GetWeakCrossThreadPersistentRegion().Trace(&visitor());
// Call weak callbacks on objects that may now be pointing to dead objects.
MarkingWorklists::WeakCallbackItem item;
@@ -275,6 +282,10 @@ void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
heap().object_allocator().ResetLinearAllocationBuffers();
heap().GetStrongPersistentRegion().Trace(&visitor());
+ if (config_.marking_type == MarkingConfig::MarkingType::kAtomic) {
+ g_process_mutex.Get().AssertHeld();
+ heap().GetStrongCrossThreadPersistentRegion().Trace(&visitor());
+ }
if (stack_state != MarkingConfig::StackState::kNoHeapPointers) {
heap().stack()->IteratePointers(&stack_visitor());
}
@@ -284,8 +295,8 @@ void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
}
void MarkerBase::ScheduleIncrementalMarkingTask() {
- if (!platform_ || !foreground_task_runner_ || incremental_marking_handle_)
- return;
+ DCHECK(platform_);
+ if (!foreground_task_runner_ || incremental_marking_handle_) return;
incremental_marking_handle_ =
IncrementalMarkingTask::Post(foreground_task_runner_.get(), this);
}
@@ -304,13 +315,11 @@ bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) {
return AdvanceMarkingWithDeadline();
}
-bool MarkerBase::AdvanceMarkingOnAllocation() {
- bool is_done = AdvanceMarkingWithDeadline();
- if (is_done) {
+void MarkerBase::AdvanceMarkingOnAllocation() {
+ if (AdvanceMarkingWithDeadline()) {
// Schedule another incremental task for finalizing without a stack.
ScheduleIncrementalMarkingTask();
}
- return is_done;
}
bool MarkerBase::AdvanceMarkingWithMaxDuration(
@@ -326,29 +335,51 @@ bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration) {
is_done = ProcessWorklistsWithDeadline(
mutator_marking_state_.marked_bytes() + step_size_in_bytes,
v8::base::TimeTicks::Now() + max_duration);
+ schedule_.UpdateIncrementalMarkedBytes(
+ mutator_marking_state_.marked_bytes());
}
- schedule_.UpdateIncrementalMarkedBytes(mutator_marking_state_.marked_bytes());
+ mutator_marking_state_.Publish();
if (!is_done) {
// If marking is atomic, |is_done| should always be true.
DCHECK_NE(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
ScheduleIncrementalMarkingTask();
+ if (config_.marking_type ==
+ MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ concurrent_marker_->NotifyIncrementalMutatorStepCompleted();
+ }
}
- mutator_marking_state_.Publish();
return is_done;
}
bool MarkerBase::ProcessWorklistsWithDeadline(
size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
do {
- // Convert |previously_not_fully_constructed_worklist_| to
- // |marking_worklist_|. This merely re-adds items with the proper
- // callbacks.
+ if ((config_.marking_type == MarkingConfig::MarkingType::kAtomic) ||
+ schedule_.ShouldFlushEphemeronPairs()) {
+ mutator_marking_state_.FlushDiscoveredEphemeronPairs();
+ }
+
+ // Bailout objects may be complicated to trace and thus might take longer
+ // than other objects. Therefore we reduce the interval between deadline
+ // checks to guarantee the deadline is not exceeded.
+ if (!DrainWorklistWithBytesAndTimeDeadline<kDefaultDeadlineCheckInterval /
+ 5>(
+ mutator_marking_state_, marked_bytes_deadline, time_deadline,
+ mutator_marking_state_.concurrent_marking_bailout_worklist(),
+ [this](const MarkingWorklists::ConcurrentMarkingBailoutItem& item) {
+ mutator_marking_state_.AccountMarkedBytes(item.bailedout_size);
+ item.callback(&visitor(), item.parameter);
+ })) {
+ return false;
+ }
+
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.previously_not_fully_constructed_worklist(),
[this](HeapObjectHeader* header) {
- TraceMarkedObject(&visitor(), header);
mutator_marking_state_.AccountMarkedBytes(*header);
+ DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
+ *header);
})) {
return false;
}
@@ -359,12 +390,10 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
[this](const MarkingWorklists::MarkingItem& item) {
const HeapObjectHeader& header =
HeapObjectHeader::FromPayload(item.base_object_payload);
- DCHECK(!header.IsInConstruction<
- HeapObjectHeader::AccessMode::kNonAtomic>());
- DCHECK(
- header.IsMarked<HeapObjectHeader::AccessMode::kNonAtomic>());
- item.callback(&visitor(), item.base_object_payload);
+ DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
+ DCHECK(header.IsMarked<AccessMode::kNonAtomic>());
mutator_marking_state_.AccountMarkedBytes(header);
+ item.callback(&visitor(), item.base_object_payload);
})) {
return false;
}
@@ -373,8 +402,19 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.write_barrier_worklist(),
[this](HeapObjectHeader* header) {
- TraceMarkedObject(&visitor(), header);
mutator_marking_state_.AccountMarkedBytes(*header);
+ DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
+ *header);
+ })) {
+ return false;
+ }
+
+ if (!DrainWorklistWithBytesAndTimeDeadline(
+ mutator_marking_state_, marked_bytes_deadline, time_deadline,
+ mutator_marking_state_.ephemeron_pairs_for_processing_worklist(),
+ [this](const MarkingWorklists::EphemeronPairItem& item) {
+ mutator_marking_state_.ProcessEphemeron(item.key,
+ item.value_desc);
})) {
return false;
}
@@ -383,32 +423,47 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
}
void MarkerBase::MarkNotFullyConstructedObjects() {
- HeapObjectHeader* header;
- MarkingWorklists::NotFullyConstructedWorklist::Local& local =
- mutator_marking_state_.not_fully_constructed_worklist();
- while (local.Pop(&header)) {
- DCHECK(header);
- DCHECK(header->IsMarked<HeapObjectHeader::AccessMode::kNonAtomic>());
+ std::unordered_set<HeapObjectHeader*> objects =
+ mutator_marking_state_.not_fully_constructed_worklist().Extract();
+ for (HeapObjectHeader* object : objects) {
+ DCHECK(object);
+ if (!mutator_marking_state_.MarkNoPush(*object)) continue;
// TraceConservativelyIfNeeded will either push to a worklist
// or trace conservatively and call AccountMarkedBytes.
- conservative_visitor().TraceConservativelyIfNeeded(*header);
+ conservative_visitor().TraceConservativelyIfNeeded(*object);
}
}
void MarkerBase::ClearAllWorklistsForTesting() {
marking_worklists_.ClearForTesting();
+ auto* compaction_worklists = heap_.compactor().compaction_worklists();
+ if (compaction_worklists) compaction_worklists->ClearForTesting();
}
void MarkerBase::DisableIncrementalMarkingForTesting() {
incremental_marking_disabled_for_testing_ = true;
}
+void MarkerBase::WaitForConcurrentMarkingForTesting() {
+ concurrent_marker_->JoinForTesting();
+}
+
+void MarkerBase::NotifyCompactionCancelled() {
+ // Compaction cannot be cancelled while concurrent marking is active.
+ DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
+ DCHECK_IMPLIES(concurrent_marker_, !concurrent_marker_->IsActive());
+ mutator_marking_state_.NotifyCompactionCancelled();
+}
+
Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
MarkingConfig config)
: MarkerBase(key, heap, platform, config),
marking_visitor_(heap, mutator_marking_state_),
conservative_marking_visitor_(heap, mutator_marking_state_,
- marking_visitor_) {}
+ marking_visitor_) {
+ concurrent_marker_ = std::make_unique<ConcurrentMarker>(
+ heap_, marking_worklists_, schedule_, platform_);
+}
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marker.h b/deps/v8/src/heap/cppgc/marker.h
index 47ce9998b4..85a8027206 100644
--- a/deps/v8/src/heap/cppgc/marker.h
+++ b/deps/v8/src/heap/cppgc/marker.h
@@ -12,6 +12,7 @@
#include "src/base/macros.h"
#include "src/base/platform/time.h"
#include "src/heap/base/worklist.h"
+#include "src/heap/cppgc/concurrent-marker.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/incremental-marking-schedule.h"
#include "src/heap/cppgc/marking-state.h"
@@ -73,7 +74,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
bool AdvanceMarkingWithMaxDuration(v8::base::TimeDelta);
// Makes marking progress when allocation a new lab.
- bool AdvanceMarkingOnAllocation();
+ void AdvanceMarkingOnAllocation();
// Signals leaving the atomic marking pause. This method expects no more
// objects to be marked and merely updates marking states if needed.
@@ -82,6 +83,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
// Combines:
// - EnterAtomicPause()
// - AdvanceMarkingWithDeadline()
+ // - ProcessWeakness()
// - LeaveAtomicPause()
void FinishMarking(MarkingConfig::StackState);
@@ -93,7 +95,9 @@ class V8_EXPORT_PRIVATE MarkerBase {
HeapBase& heap() { return heap_; }
MarkingWorklists& MarkingWorklistsForTesting() { return marking_worklists_; }
- MarkingState& MarkingStateForTesting() { return mutator_marking_state_; }
+ MutatorMarkingState& MutatorMarkingStateForTesting() {
+ return mutator_marking_state_;
+ }
cppgc::Visitor& VisitorForTesting() { return visitor(); }
void ClearAllWorklistsForTesting();
@@ -118,6 +122,10 @@ class V8_EXPORT_PRIVATE MarkerBase {
void DisableIncrementalMarkingForTesting();
+ void WaitForConcurrentMarkingForTesting();
+
+ void NotifyCompactionCancelled();
+
protected:
static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration =
v8::base::TimeDelta::FromMilliseconds(2);
@@ -162,11 +170,13 @@ class V8_EXPORT_PRIVATE MarkerBase {
IncrementalMarkingTask::Handle incremental_marking_handle_;
MarkingWorklists marking_worklists_;
- MarkingState mutator_marking_state_;
- bool is_marking_started_ = false;
+ MutatorMarkingState mutator_marking_state_;
+ bool is_marking_started_{false};
IncrementalMarkingSchedule schedule_;
+ std::unique_ptr<ConcurrentMarkerBase> concurrent_marker_{nullptr};
+
bool incremental_marking_disabled_for_testing_{false};
friend class MarkerFactory;
@@ -200,12 +210,13 @@ class V8_EXPORT_PRIVATE Marker final : public MarkerBase {
}
private:
- MarkingVisitor marking_visitor_;
+ MutatorMarkingVisitor marking_visitor_;
ConservativeMarkingVisitor conservative_marking_visitor_;
};
void MarkerBase::WriteBarrierForInConstructionObject(HeapObjectHeader& header) {
- mutator_marking_state_.not_fully_constructed_worklist().Push(&header);
+ mutator_marking_state_.not_fully_constructed_worklist()
+ .Push<AccessMode::kAtomic>(&header);
}
void MarkerBase::WriteBarrierForObject(HeapObjectHeader& header) {
diff --git a/deps/v8/src/heap/cppgc/marking-state.cc b/deps/v8/src/heap/cppgc/marking-state.cc
index 0cc160bd0d..1796e67cbe 100644
--- a/deps/v8/src/heap/cppgc/marking-state.cc
+++ b/deps/v8/src/heap/cppgc/marking-state.cc
@@ -4,16 +4,26 @@
#include "src/heap/cppgc/marking-state.h"
+#include <unordered_set>
+
namespace cppgc {
namespace internal {
-void MarkingState::FlushNotFullyConstructedObjects() {
- not_fully_constructed_worklist().Publish();
- if (!not_fully_constructed_worklist_.IsGlobalEmpty()) {
- previously_not_fully_constructed_worklist_.Merge(
- &not_fully_constructed_worklist_);
+void MutatorMarkingState::FlushNotFullyConstructedObjects() {
+ std::unordered_set<HeapObjectHeader*> objects =
+ not_fully_constructed_worklist_.Extract<AccessMode::kAtomic>();
+ for (HeapObjectHeader* object : objects) {
+ if (MarkNoPush(*object))
+ previously_not_fully_constructed_worklist_.Push(object);
+ }
+}
+
+void MutatorMarkingState::FlushDiscoveredEphemeronPairs() {
+ discovered_ephemeron_pairs_worklist_.Publish();
+ if (!discovered_ephemeron_pairs_worklist_.IsGlobalEmpty()) {
+ ephemeron_pairs_for_processing_worklist_.Merge(
+ &discovered_ephemeron_pairs_worklist_);
}
- DCHECK(not_fully_constructed_worklist_.IsGlobalEmpty());
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/marking-state.h b/deps/v8/src/heap/cppgc/marking-state.h
index 526633d455..777ee08e35 100644
--- a/deps/v8/src/heap/cppgc/marking-state.h
+++ b/deps/v8/src/heap/cppgc/marking-state.h
@@ -6,6 +6,7 @@
#define V8_HEAP_CPPGC_MARKING_STATE_H_
#include "include/cppgc/trace-trait.h"
+#include "src/heap/cppgc/compaction-worklists.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
@@ -16,52 +17,60 @@ namespace cppgc {
namespace internal {
// C++ marking implementation.
-class MarkingState {
+class MarkingStateBase {
public:
- inline MarkingState(HeapBase& heap, MarkingWorklists&);
+ inline MarkingStateBase(HeapBase& heap, MarkingWorklists&,
+ CompactionWorklists*);
- MarkingState(const MarkingState&) = delete;
- MarkingState& operator=(const MarkingState&) = delete;
+ MarkingStateBase(const MarkingStateBase&) = delete;
+ MarkingStateBase& operator=(const MarkingStateBase&) = delete;
inline void MarkAndPush(const void*, TraceDescriptor);
- inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
inline void MarkAndPush(HeapObjectHeader&);
- inline bool MarkNoPush(HeapObjectHeader&);
-
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
- inline void DynamicallyMarkAddress(ConstAddress);
+ inline void PushMarked(HeapObjectHeader&, TraceDescriptor desc);
inline void RegisterWeakReferenceIfNeeded(const void*, TraceDescriptor,
WeakCallback, const void*);
inline void RegisterWeakCallback(WeakCallback, const void*);
- inline void InvokeWeakRootsCallbackIfNeeded(const void*, TraceDescriptor,
- WeakCallback, const void*);
+
+ void RegisterMovableReference(const void** slot) {
+ if (!movable_slots_worklist_) return;
+ movable_slots_worklist_->Push(slot);
+ }
+
+ // Weak containers are special in that they may require re-tracing if
+ // reachable through stack, even if the container was already traced before.
+ // ProcessWeakContainer records which weak containers were already marked so
+ // that conservative stack scanning knows to retrace them.
+ inline void ProcessWeakContainer(const void*, TraceDescriptor, WeakCallback,
+ const void*);
+
+ inline void ProcessEphemeron(const void*, TraceDescriptor);
inline void AccountMarkedBytes(const HeapObjectHeader&);
+ inline void AccountMarkedBytes(size_t);
size_t marked_bytes() const { return marked_bytes_; }
void Publish() {
marking_worklist_.Publish();
- not_fully_constructed_worklist_.Publish();
previously_not_fully_constructed_worklist_.Publish();
weak_callback_worklist_.Publish();
write_barrier_worklist_.Publish();
+ concurrent_marking_bailout_worklist_.Publish();
+ discovered_ephemeron_pairs_worklist_.Publish();
+ ephemeron_pairs_for_processing_worklist_.Publish();
+ if (IsCompactionEnabled()) movable_slots_worklist_->Publish();
}
- // Moves objects in not_fully_constructed_worklist_ to
- // previously_not_full_constructed_worklists_.
- void FlushNotFullyConstructedObjects();
-
MarkingWorklists::MarkingWorklist::Local& marking_worklist() {
return marking_worklist_;
}
- MarkingWorklists::NotFullyConstructedWorklist::Local&
+ MarkingWorklists::NotFullyConstructedWorklist&
not_fully_constructed_worklist() {
return not_fully_constructed_worklist_;
}
- MarkingWorklists::NotFullyConstructedWorklist::Local&
+ MarkingWorklists::PreviouslyNotFullyConstructedWorklist::Local&
previously_not_fully_constructed_worklist() {
return previously_not_fully_constructed_worklist_;
}
@@ -71,103 +80,269 @@ class MarkingState {
MarkingWorklists::WriteBarrierWorklist::Local& write_barrier_worklist() {
return write_barrier_worklist_;
}
+ MarkingWorklists::ConcurrentMarkingBailoutWorklist::Local&
+ concurrent_marking_bailout_worklist() {
+ return concurrent_marking_bailout_worklist_;
+ }
+ MarkingWorklists::EphemeronPairsWorklist::Local&
+ discovered_ephemeron_pairs_worklist() {
+ return discovered_ephemeron_pairs_worklist_;
+ }
+ MarkingWorklists::EphemeronPairsWorklist::Local&
+ ephemeron_pairs_for_processing_worklist() {
+ return ephemeron_pairs_for_processing_worklist_;
+ }
+ MarkingWorklists::WeakContainersWorklist& weak_containers_worklist() {
+ return weak_containers_worklist_;
+ }
+
+ CompactionWorklists::MovableReferencesWorklist::Local*
+ movable_slots_worklist() {
+ return movable_slots_worklist_.get();
+ }
+
+ void NotifyCompactionCancelled() {
+ DCHECK(IsCompactionEnabled());
+ movable_slots_worklist_->Clear();
+ movable_slots_worklist_.reset();
+ }
+
+ protected:
+ inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
+
+ inline bool MarkNoPush(HeapObjectHeader&);
+
+ inline void RegisterWeakContainer(HeapObjectHeader&);
+
+ inline bool IsCompactionEnabled() const {
+ return movable_slots_worklist_.get();
+ }
- private:
#ifdef DEBUG
HeapBase& heap_;
#endif // DEBUG
MarkingWorklists::MarkingWorklist::Local marking_worklist_;
- MarkingWorklists::NotFullyConstructedWorklist::Local
+ MarkingWorklists::NotFullyConstructedWorklist&
not_fully_constructed_worklist_;
- MarkingWorklists::NotFullyConstructedWorklist::Local
+ MarkingWorklists::PreviouslyNotFullyConstructedWorklist::Local
previously_not_fully_constructed_worklist_;
MarkingWorklists::WeakCallbackWorklist::Local weak_callback_worklist_;
MarkingWorklists::WriteBarrierWorklist::Local write_barrier_worklist_;
+ MarkingWorklists::ConcurrentMarkingBailoutWorklist::Local
+ concurrent_marking_bailout_worklist_;
+ MarkingWorklists::EphemeronPairsWorklist::Local
+ discovered_ephemeron_pairs_worklist_;
+ MarkingWorklists::EphemeronPairsWorklist::Local
+ ephemeron_pairs_for_processing_worklist_;
+ MarkingWorklists::WeakContainersWorklist& weak_containers_worklist_;
+ // Existence of the worklist (|movable_slot_worklist_| != nullptr) denotes
+ // that compaction is currently enabled and slots must be recorded.
+ std::unique_ptr<CompactionWorklists::MovableReferencesWorklist::Local>
+ movable_slots_worklist_;
size_t marked_bytes_ = 0;
};
-MarkingState::MarkingState(HeapBase& heap, MarkingWorklists& marking_worklists)
+MarkingStateBase::MarkingStateBase(HeapBase& heap,
+ MarkingWorklists& marking_worklists,
+ CompactionWorklists* compaction_worklists)
:
#ifdef DEBUG
heap_(heap),
#endif // DEBUG
marking_worklist_(marking_worklists.marking_worklist()),
not_fully_constructed_worklist_(
- marking_worklists.not_fully_constructed_worklist()),
+ *marking_worklists.not_fully_constructed_worklist()),
previously_not_fully_constructed_worklist_(
marking_worklists.previously_not_fully_constructed_worklist()),
weak_callback_worklist_(marking_worklists.weak_callback_worklist()),
- write_barrier_worklist_(marking_worklists.write_barrier_worklist()) {
+ write_barrier_worklist_(marking_worklists.write_barrier_worklist()),
+ concurrent_marking_bailout_worklist_(
+ marking_worklists.concurrent_marking_bailout_worklist()),
+ discovered_ephemeron_pairs_worklist_(
+ marking_worklists.discovered_ephemeron_pairs_worklist()),
+ ephemeron_pairs_for_processing_worklist_(
+ marking_worklists.ephemeron_pairs_for_processing_worklist()),
+ weak_containers_worklist_(*marking_worklists.weak_containers_worklist()) {
+ if (compaction_worklists) {
+ movable_slots_worklist_ =
+ std::make_unique<CompactionWorklists::MovableReferencesWorklist::Local>(
+ compaction_worklists->movable_slots_worklist());
+ }
}
-void MarkingState::MarkAndPush(const void* object, TraceDescriptor desc) {
+void MarkingStateBase::MarkAndPush(const void* object, TraceDescriptor desc) {
DCHECK_NOT_NULL(object);
MarkAndPush(HeapObjectHeader::FromPayload(
const_cast<void*>(desc.base_object_payload)),
desc);
}
-void MarkingState::MarkAndPush(HeapObjectHeader& header, TraceDescriptor desc) {
+void MarkingStateBase::MarkAndPush(HeapObjectHeader& header,
+ TraceDescriptor desc) {
DCHECK_NOT_NULL(desc.callback);
- if (!MarkNoPush(header)) return;
-
- if (header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>()) {
- not_fully_constructed_worklist_.Push(&header);
- } else {
- marking_worklist_.Push(desc);
+ if (header.IsInConstruction<AccessMode::kAtomic>()) {
+ not_fully_constructed_worklist_.Push<AccessMode::kAtomic>(&header);
+ } else if (MarkNoPush(header)) {
+ PushMarked(header, desc);
}
}
-bool MarkingState::MarkNoPush(HeapObjectHeader& header) {
+bool MarkingStateBase::MarkNoPush(HeapObjectHeader& header) {
// A GC should only mark the objects that belong in its heap.
DCHECK_EQ(&heap_, BasePage::FromPayload(&header)->heap());
// Never mark free space objects. This would e.g. hint to marking a promptly
// freed backing store.
- DCHECK(!header.IsFree());
+ DCHECK(!header.IsFree<AccessMode::kAtomic>());
return header.TryMarkAtomic();
}
-template <HeapObjectHeader::AccessMode mode>
-void MarkingState::DynamicallyMarkAddress(ConstAddress address) {
- HeapObjectHeader& header =
- BasePage::FromPayload(address)->ObjectHeaderFromInnerAddress<mode>(
- const_cast<Address>(address));
- DCHECK(!header.IsInConstruction<mode>());
- if (MarkNoPush(header)) {
- marking_worklist_.Push(
- {reinterpret_cast<void*>(header.Payload()),
- GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex<mode>())
- .trace});
- }
-}
-
-void MarkingState::MarkAndPush(HeapObjectHeader& header) {
+void MarkingStateBase::MarkAndPush(HeapObjectHeader& header) {
MarkAndPush(
header,
{header.Payload(),
GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
-void MarkingState::RegisterWeakReferenceIfNeeded(const void* object,
- TraceDescriptor desc,
- WeakCallback weak_callback,
- const void* parameter) {
+void MarkingStateBase::PushMarked(HeapObjectHeader& header,
+ TraceDescriptor desc) {
+ DCHECK(header.IsMarked<AccessMode::kAtomic>());
+ DCHECK(!header.IsInConstruction<AccessMode::kAtomic>());
+ DCHECK_NOT_NULL(desc.callback);
+
+ marking_worklist_.Push(desc);
+}
+
+void MarkingStateBase::RegisterWeakReferenceIfNeeded(const void* object,
+ TraceDescriptor desc,
+ WeakCallback weak_callback,
+ const void* parameter) {
// Filter out already marked values. The write barrier for WeakMember
// ensures that any newly set value after this point is kept alive and does
// not require the callback.
if (HeapObjectHeader::FromPayload(desc.base_object_payload)
- .IsMarked<HeapObjectHeader::AccessMode::kAtomic>())
+ .IsMarked<AccessMode::kAtomic>())
return;
RegisterWeakCallback(weak_callback, parameter);
}
-void MarkingState::InvokeWeakRootsCallbackIfNeeded(const void* object,
- TraceDescriptor desc,
- WeakCallback weak_callback,
- const void* parameter) {
+void MarkingStateBase::RegisterWeakCallback(WeakCallback callback,
+ const void* object) {
+ DCHECK_NOT_NULL(callback);
+ weak_callback_worklist_.Push({callback, object});
+}
+
+void MarkingStateBase::RegisterWeakContainer(HeapObjectHeader& header) {
+ weak_containers_worklist_.Push<AccessMode::kAtomic>(&header);
+}
+
+void MarkingStateBase::ProcessWeakContainer(const void* object,
+ TraceDescriptor desc,
+ WeakCallback callback,
+ const void* data) {
+ DCHECK_NOT_NULL(object);
+
+ HeapObjectHeader& header =
+ HeapObjectHeader::FromPayload(const_cast<void*>(object));
+
+ if (header.IsInConstruction<AccessMode::kAtomic>()) {
+ not_fully_constructed_worklist_.Push<AccessMode::kAtomic>(&header);
+ return;
+ }
+
+ // Only mark the container initially. Its buckets will be processed after
+ // marking.
+ if (!MarkNoPush(header)) return;
+ RegisterWeakContainer(header);
+
+ // Register final weak processing of the backing store.
+ RegisterWeakCallback(callback, data);
+
+ // Weak containers might not require tracing. In such cases the callback in
+ // the TraceDescriptor will be nullptr. For ephemerons the callback will be
+ // non-nullptr so that the container is traced and the ephemeron pairs are
+ // processed.
+ if (desc.callback) PushMarked(header, desc);
+}
+
+void MarkingStateBase::ProcessEphemeron(const void* key,
+ TraceDescriptor value_desc) {
+ // Filter out already marked keys. The write barrier for WeakMember
+ // ensures that any newly set value after this point is kept alive and does
+ // not require the callback.
+ if (HeapObjectHeader::FromPayload(key).IsMarked<AccessMode::kAtomic>()) {
+ MarkAndPush(value_desc.base_object_payload, value_desc);
+ return;
+ }
+ discovered_ephemeron_pairs_worklist_.Push({key, value_desc});
+}
+
+void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
+ AccountMarkedBytes(
+ header.IsLargeObject<AccessMode::kAtomic>()
+ ? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
+ ->PayloadSize()
+ : header.GetSize<AccessMode::kAtomic>());
+}
+
+void MarkingStateBase::AccountMarkedBytes(size_t marked_bytes) {
+ marked_bytes_ += marked_bytes;
+}
+
+class MutatorMarkingState : public MarkingStateBase {
+ public:
+ MutatorMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists,
+ CompactionWorklists* compaction_worklists)
+ : MarkingStateBase(heap, marking_worklists, compaction_worklists) {}
+
+ inline bool MarkNoPush(HeapObjectHeader& header) {
+ return MutatorMarkingState::MarkingStateBase::MarkNoPush(header);
+ }
+
+ inline void PushMarkedWeakContainer(HeapObjectHeader&);
+
+ inline void DynamicallyMarkAddress(ConstAddress);
+
+ // Moves objects in not_fully_constructed_worklist_ to
+ // previously_not_full_constructed_worklists_.
+ void FlushNotFullyConstructedObjects();
+
+ // Moves ephemeron pairs in discovered_ephemeron_pairs_worklist_ to
+ // ephemeron_pairs_for_processing_worklist_.
+ void FlushDiscoveredEphemeronPairs();
+
+ inline void InvokeWeakRootsCallbackIfNeeded(const void*, TraceDescriptor,
+ WeakCallback, const void*);
+
+ inline bool IsMarkedWeakContainer(HeapObjectHeader&);
+};
+
+void MutatorMarkingState::PushMarkedWeakContainer(HeapObjectHeader& header) {
+ DCHECK(weak_containers_worklist_.Contains(&header));
+ weak_containers_worklist_.Erase(&header);
+ PushMarked(
+ header,
+ {header.Payload(),
+ GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
+}
+
+void MutatorMarkingState::DynamicallyMarkAddress(ConstAddress address) {
+ HeapObjectHeader& header =
+ BasePage::FromPayload(address)->ObjectHeaderFromInnerAddress(
+ const_cast<Address>(address));
+ DCHECK(!header.IsInConstruction());
+ if (MarkNoPush(header)) {
+ marking_worklist_.Push(
+ {reinterpret_cast<void*>(header.Payload()),
+ GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
+ }
+}
+
+void MutatorMarkingState::InvokeWeakRootsCallbackIfNeeded(
+ const void* object, TraceDescriptor desc, WeakCallback weak_callback,
+ const void* parameter) {
// Since weak roots are only traced at the end of marking, we can execute
// the callback instead of registering it.
#if DEBUG
@@ -178,17 +353,65 @@ void MarkingState::InvokeWeakRootsCallbackIfNeeded(const void* object,
weak_callback(LivenessBrokerFactory::Create(), parameter);
}
-void MarkingState::RegisterWeakCallback(WeakCallback callback,
- const void* object) {
- weak_callback_worklist_.Push({callback, object});
+bool MutatorMarkingState::IsMarkedWeakContainer(HeapObjectHeader& header) {
+ const bool result = weak_containers_worklist_.Contains(&header);
+ DCHECK_IMPLIES(result, header.IsMarked());
+ return result;
}
-void MarkingState::AccountMarkedBytes(const HeapObjectHeader& header) {
- marked_bytes_ +=
- header.IsLargeObject()
- ? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
- ->PayloadSize()
- : header.GetSize();
+class ConcurrentMarkingState : public MarkingStateBase {
+ public:
+ ConcurrentMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists,
+ CompactionWorklists* compaction_worklists)
+ : MarkingStateBase(heap, marking_worklists, compaction_worklists) {}
+
+ ~ConcurrentMarkingState() { DCHECK_EQ(last_marked_bytes_, marked_bytes_); }
+
+ size_t RecentlyMarkedBytes() {
+ return marked_bytes_ - std::exchange(last_marked_bytes_, marked_bytes_);
+ }
+
+ inline void AccountDeferredMarkedBytes(size_t deferred_bytes) {
+ // AccountDeferredMarkedBytes is called from Trace methods, which are always
+ // called after AccountMarkedBytes, so there should be no underflow here.
+ DCHECK_LE(deferred_bytes, marked_bytes_);
+ marked_bytes_ -= deferred_bytes;
+ }
+
+ private:
+ size_t last_marked_bytes_ = 0;
+};
+
+template <size_t deadline_check_interval, typename WorklistLocal,
+ typename Callback, typename Predicate>
+bool DrainWorklistWithPredicate(Predicate should_yield,
+ WorklistLocal& worklist_local,
+ Callback callback) {
+ if (worklist_local.IsLocalAndGlobalEmpty()) return true;
+ // For concurrent markers, should_yield also reports marked bytes.
+ if (should_yield()) return false;
+ size_t processed_callback_count = deadline_check_interval;
+ typename WorklistLocal::ItemType item;
+ while (worklist_local.Pop(&item)) {
+ callback(item);
+ if (--processed_callback_count == 0) {
+ if (should_yield()) {
+ return false;
+ }
+ processed_callback_count = deadline_check_interval;
+ }
+ }
+ return true;
+}
+
+template <AccessMode mode>
+void DynamicallyTraceMarkedObject(Visitor& visitor,
+ const HeapObjectHeader& header) {
+ DCHECK(!header.IsInConstruction<mode>());
+ DCHECK(header.IsMarked<mode>());
+ const GCInfo& gcinfo =
+ GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex<mode>());
+ gcinfo.trace(&visitor, header.Payload());
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.cc b/deps/v8/src/heap/cppgc/marking-verifier.cc
index 4238709ae1..009228a8ff 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.cc
+++ b/deps/v8/src/heap/cppgc/marking-verifier.cc
@@ -6,62 +6,63 @@
#include "src/base/logging.h"
#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/marking-visitor.h"
namespace cppgc {
namespace internal {
-MarkingVerifier::MarkingVerifier(HeapBase& heap,
- Heap::Config::StackState stack_state)
- : cppgc::Visitor(VisitorFactory::CreateKey()),
- ConservativeTracingVisitor(heap, *heap.page_backend(), *this) {
- Traverse(&heap.raw_heap());
+MarkingVerifierBase::MarkingVerifierBase(
+ HeapBase& heap, std::unique_ptr<cppgc::Visitor> visitor)
+ : ConservativeTracingVisitor(heap, *heap.page_backend(), *visitor.get()),
+ visitor_(std::move(visitor)) {}
+
+void MarkingVerifierBase::Run(Heap::Config::StackState stack_state) {
+ Traverse(&heap_.raw_heap());
if (stack_state == Heap::Config::StackState::kMayContainHeapPointers) {
in_construction_objects_ = &in_construction_objects_stack_;
- heap.stack()->IteratePointers(this);
+ heap_.stack()->IteratePointers(this);
CHECK_EQ(in_construction_objects_stack_, in_construction_objects_heap_);
}
}
-void MarkingVerifier::Visit(const void* object, TraceDescriptor desc) {
- VerifyChild(desc.base_object_payload);
-}
-
-void MarkingVerifier::VisitWeak(const void* object, TraceDescriptor desc,
- WeakCallback, const void*) {
- // Weak objects should have been cleared at this point. As a consequence, all
- // objects found through weak references have to point to live objects at this
- // point.
- VerifyChild(desc.base_object_payload);
-}
-
-void MarkingVerifier::VerifyChild(const void* base_object_payload) {
+void VerificationState::VerifyMarked(const void* base_object_payload) const {
const HeapObjectHeader& child_header =
HeapObjectHeader::FromPayload(base_object_payload);
- CHECK(child_header.IsMarked());
+ if (!child_header.IsMarked()) {
+ FATAL(
+ "MarkingVerifier: Encountered unmarked object.\n"
+ "#\n"
+ "# Hint:\n"
+ "# %s\n"
+ "# \\-> %s",
+ parent_->GetName().value, child_header.GetName().value);
+ }
}
-void MarkingVerifier::VisitConservatively(
+void MarkingVerifierBase::VisitInConstructionConservatively(
HeapObjectHeader& header, TraceConservativelyCallback callback) {
CHECK(header.IsMarked());
in_construction_objects_->insert(&header);
callback(this, header);
}
-void MarkingVerifier::VisitPointer(const void* address) {
+void MarkingVerifierBase::VisitPointer(const void* address) {
TraceConservativelyIfNeeded(address);
}
-bool MarkingVerifier::VisitHeapObjectHeader(HeapObjectHeader* header) {
+bool MarkingVerifierBase::VisitHeapObjectHeader(HeapObjectHeader* header) {
// Verify only non-free marked objects.
if (!header->IsMarked()) return true;
DCHECK(!header->IsFree());
+ SetCurrentParent(header);
+
if (!header->IsInConstruction()) {
- GlobalGCInfoTable::GCInfoFromIndex(header->GetGCInfoIndex())
- .trace(this, header->Payload());
+ header->Trace(visitor_.get());
} else {
// Dispatches to conservative tracing implementation.
TraceConservativelyIfNeeded(*header);
@@ -70,5 +71,50 @@ bool MarkingVerifier::VisitHeapObjectHeader(HeapObjectHeader* header) {
return true;
}
+namespace {
+
+class VerificationVisitor final : public cppgc::Visitor {
+ public:
+ explicit VerificationVisitor(VerificationState& state)
+ : cppgc::Visitor(VisitorFactory::CreateKey()), state_(state) {}
+
+ void Visit(const void*, TraceDescriptor desc) final {
+ state_.VerifyMarked(desc.base_object_payload);
+ }
+
+ void VisitWeak(const void*, TraceDescriptor desc, WeakCallback,
+ const void*) final {
+ // Weak objects should have been cleared at this point. As a consequence,
+ // all objects found through weak references have to point to live objects
+ // at this point.
+ state_.VerifyMarked(desc.base_object_payload);
+ }
+
+ void VisitWeakContainer(const void* object, TraceDescriptor,
+ TraceDescriptor weak_desc, WeakCallback,
+ const void*) {
+ if (!object) return;
+
+ // Contents of weak containers are found themselves through page iteration
+ // and are treated strongly, similar to how they are treated strongly when
+ // found through stack scanning. The verification here only makes sure that
+ // the container itself is properly marked.
+ state_.VerifyMarked(weak_desc.base_object_payload);
+ }
+
+ private:
+ VerificationState& state_;
+};
+
+} // namespace
+
+MarkingVerifier::MarkingVerifier(HeapBase& heap_base)
+ : MarkingVerifierBase(heap_base,
+ std::make_unique<VerificationVisitor>(state_)) {}
+
+void MarkingVerifier::SetCurrentParent(const HeapObjectHeader* parent) {
+ state_.SetCurrentParent(parent);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.h b/deps/v8/src/heap/cppgc/marking-verifier.h
index 45661bd465..eeced68449 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.h
+++ b/deps/v8/src/heap/cppgc/marking-verifier.h
@@ -8,6 +8,7 @@
#include <unordered_set>
#include "src/heap/base/stack.h"
+#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/visitor.h"
@@ -15,34 +16,60 @@
namespace cppgc {
namespace internal {
-class V8_EXPORT_PRIVATE MarkingVerifier final
- : private HeapVisitor<MarkingVerifier>,
- public cppgc::Visitor,
+class VerificationState {
+ public:
+ void VerifyMarked(const void*) const;
+ void SetCurrentParent(const HeapObjectHeader* header) { parent_ = header; }
+
+ private:
+ const HeapObjectHeader* parent_ = nullptr;
+};
+
+class V8_EXPORT_PRIVATE MarkingVerifierBase
+ : private HeapVisitor<MarkingVerifierBase>,
public ConservativeTracingVisitor,
public heap::base::StackVisitor {
- friend class HeapVisitor<MarkingVerifier>;
+ friend class HeapVisitor<MarkingVerifierBase>;
public:
- explicit MarkingVerifier(HeapBase&, Heap::Config::StackState);
+ ~MarkingVerifierBase() override = default;
- void Visit(const void*, TraceDescriptor) final;
- void VisitWeak(const void*, TraceDescriptor, WeakCallback, const void*) final;
+ MarkingVerifierBase(const MarkingVerifierBase&) = delete;
+ MarkingVerifierBase& operator=(const MarkingVerifierBase&) = delete;
- private:
- void VerifyChild(const void*);
+ void Run(Heap::Config::StackState);
+
+ protected:
+ MarkingVerifierBase(HeapBase&, std::unique_ptr<cppgc::Visitor>);
+
+ virtual void SetCurrentParent(const HeapObjectHeader*) = 0;
- void VisitConservatively(HeapObjectHeader&,
- TraceConservativelyCallback) final;
+ private:
+ void VisitInConstructionConservatively(HeapObjectHeader&,
+ TraceConservativelyCallback) final;
void VisitPointer(const void*) final;
bool VisitHeapObjectHeader(HeapObjectHeader*);
+ std::unique_ptr<cppgc::Visitor> visitor_;
+
std::unordered_set<const HeapObjectHeader*> in_construction_objects_heap_;
std::unordered_set<const HeapObjectHeader*> in_construction_objects_stack_;
std::unordered_set<const HeapObjectHeader*>* in_construction_objects_ =
&in_construction_objects_heap_;
};
+class V8_EXPORT_PRIVATE MarkingVerifier final : public MarkingVerifierBase {
+ public:
+ explicit MarkingVerifier(HeapBase&);
+ ~MarkingVerifier() final = default;
+
+ void SetCurrentParent(const HeapObjectHeader*) final;
+
+ private:
+ VerificationState state_;
+};
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.cc b/deps/v8/src/heap/cppgc/marking-visitor.cc
index b08379eb7e..896b12fc6c 100644
--- a/deps/v8/src/heap/cppgc/marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc/marking-visitor.cc
@@ -10,51 +10,100 @@
namespace cppgc {
namespace internal {
-MarkingVisitor::MarkingVisitor(HeapBase& heap, MarkingState& marking_state)
+MarkingVisitorBase::MarkingVisitorBase(HeapBase& heap,
+ MarkingStateBase& marking_state)
: marking_state_(marking_state) {}
-void MarkingVisitor::Visit(const void* object, TraceDescriptor desc) {
+void MarkingVisitorBase::Visit(const void* object, TraceDescriptor desc) {
marking_state_.MarkAndPush(object, desc);
}
-void MarkingVisitor::VisitWeak(const void* object, TraceDescriptor desc,
- WeakCallback weak_callback,
- const void* weak_member) {
+void MarkingVisitorBase::VisitWeak(const void* object, TraceDescriptor desc,
+ WeakCallback weak_callback,
+ const void* weak_member) {
marking_state_.RegisterWeakReferenceIfNeeded(object, desc, weak_callback,
weak_member);
}
-void MarkingVisitor::VisitRoot(const void* object, TraceDescriptor desc) {
- Visit(object, desc);
+void MarkingVisitorBase::VisitEphemeron(const void* key,
+ TraceDescriptor value_desc) {
+ marking_state_.ProcessEphemeron(key, value_desc);
}
-void MarkingVisitor::VisitWeakRoot(const void* object, TraceDescriptor desc,
- WeakCallback weak_callback,
- const void* weak_root) {
- marking_state_.InvokeWeakRootsCallbackIfNeeded(object, desc, weak_callback,
- weak_root);
+void MarkingVisitorBase::VisitWeakContainer(const void* object,
+ TraceDescriptor strong_desc,
+ TraceDescriptor weak_desc,
+ WeakCallback callback,
+ const void* data) {
+ marking_state_.ProcessWeakContainer(object, weak_desc, callback, data);
}
-void MarkingVisitor::RegisterWeakCallback(WeakCallback callback,
- const void* object) {
+void MarkingVisitorBase::RegisterWeakCallback(WeakCallback callback,
+ const void* object) {
marking_state_.RegisterWeakCallback(callback, object);
}
+void MarkingVisitorBase::HandleMovableReference(const void** slot) {
+ marking_state_.RegisterMovableReference(slot);
+}
+
ConservativeMarkingVisitor::ConservativeMarkingVisitor(
- HeapBase& heap, MarkingState& marking_state, cppgc::Visitor& visitor)
+ HeapBase& heap, MutatorMarkingState& marking_state, cppgc::Visitor& visitor)
: ConservativeTracingVisitor(heap, *heap.page_backend(), visitor),
marking_state_(marking_state) {}
-void ConservativeMarkingVisitor::VisitConservatively(
+void ConservativeMarkingVisitor::VisitFullyConstructedConservatively(
+ HeapObjectHeader& header) {
+ if (header.IsMarked()) {
+ if (marking_state_.IsMarkedWeakContainer(header))
+ marking_state_.PushMarkedWeakContainer(header);
+ return;
+ }
+ ConservativeTracingVisitor::VisitFullyConstructedConservatively(header);
+}
+
+void ConservativeMarkingVisitor::VisitInConstructionConservatively(
HeapObjectHeader& header, TraceConservativelyCallback callback) {
+ DCHECK(!marking_state_.IsMarkedWeakContainer(header));
marking_state_.MarkNoPush(header);
- callback(this, header);
marking_state_.AccountMarkedBytes(header);
+ callback(this, header);
}
+MutatorMarkingVisitor::MutatorMarkingVisitor(HeapBase& heap,
+ MutatorMarkingState& marking_state)
+ : MarkingVisitorBase(heap, marking_state) {}
+
+void MutatorMarkingVisitor::VisitRoot(const void* object, TraceDescriptor desc,
+ const SourceLocation&) {
+ Visit(object, desc);
+}
+
+void MutatorMarkingVisitor::VisitWeakRoot(const void* object,
+ TraceDescriptor desc,
+ WeakCallback weak_callback,
+ const void* weak_root,
+ const SourceLocation&) {
+ static_cast<MutatorMarkingState&>(marking_state_)
+ .InvokeWeakRootsCallbackIfNeeded(object, desc, weak_callback, weak_root);
+}
+
+ConcurrentMarkingVisitor::ConcurrentMarkingVisitor(
+ HeapBase& heap, ConcurrentMarkingState& marking_state)
+ : MarkingVisitorBase(heap, marking_state) {}
+
void ConservativeMarkingVisitor::VisitPointer(const void* address) {
TraceConservativelyIfNeeded(address);
}
+bool ConcurrentMarkingVisitor::DeferTraceToMutatorThreadIfConcurrent(
+ const void* parameter, TraceCallback callback, size_t deferred_size) {
+ marking_state_.concurrent_marking_bailout_worklist().Push(
+ {parameter, callback, deferred_size});
+ static_cast<ConcurrentMarkingState&>(marking_state_)
+ .AccountDeferredMarkedBytes(deferred_size);
+ return true;
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.h b/deps/v8/src/heap/cppgc/marking-visitor.h
index 408fa2514c..91cca87dd9 100644
--- a/deps/v8/src/heap/cppgc/marking-visitor.h
+++ b/deps/v8/src/heap/cppgc/marking-visitor.h
@@ -16,36 +16,71 @@ namespace internal {
class HeapBase;
class HeapObjectHeader;
class Marker;
-class MarkingState;
+class MarkingStateBase;
+class MutatorMarkingState;
+class ConcurrentMarkingState;
-class V8_EXPORT_PRIVATE MarkingVisitor : public VisitorBase {
+class V8_EXPORT_PRIVATE MarkingVisitorBase : public VisitorBase {
public:
- MarkingVisitor(HeapBase&, MarkingState&);
- ~MarkingVisitor() override = default;
+ MarkingVisitorBase(HeapBase&, MarkingStateBase&);
+ ~MarkingVisitorBase() override = default;
protected:
void Visit(const void*, TraceDescriptor) final;
void VisitWeak(const void*, TraceDescriptor, WeakCallback, const void*) final;
- void VisitRoot(const void*, TraceDescriptor) final;
- void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
- const void*) final;
+ void VisitEphemeron(const void*, TraceDescriptor) final;
+ void VisitWeakContainer(const void* self, TraceDescriptor strong_desc,
+ TraceDescriptor weak_desc, WeakCallback callback,
+ const void* data) final;
void RegisterWeakCallback(WeakCallback, const void*) final;
+ void HandleMovableReference(const void**) final;
- MarkingState& marking_state_;
+ MarkingStateBase& marking_state_;
+};
+
+class V8_EXPORT_PRIVATE MutatorMarkingVisitor : public MarkingVisitorBase {
+ public:
+ MutatorMarkingVisitor(HeapBase&, MutatorMarkingState&);
+ ~MutatorMarkingVisitor() override = default;
+
+ protected:
+ void VisitRoot(const void*, TraceDescriptor, const SourceLocation&) final;
+ void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback, const void*,
+ const SourceLocation&) final;
+};
+
+class V8_EXPORT_PRIVATE ConcurrentMarkingVisitor final
+ : public MarkingVisitorBase {
+ public:
+ ConcurrentMarkingVisitor(HeapBase&, ConcurrentMarkingState&);
+ ~ConcurrentMarkingVisitor() override = default;
+
+ protected:
+ void VisitRoot(const void*, TraceDescriptor, const SourceLocation&) final {
+ UNREACHABLE();
+ }
+ void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback, const void*,
+ const SourceLocation&) final {
+ UNREACHABLE();
+ }
+
+ bool DeferTraceToMutatorThreadIfConcurrent(const void*, TraceCallback,
+ size_t) final;
};
class ConservativeMarkingVisitor : public ConservativeTracingVisitor,
public heap::base::StackVisitor {
public:
- ConservativeMarkingVisitor(HeapBase&, MarkingState&, cppgc::Visitor&);
+ ConservativeMarkingVisitor(HeapBase&, MutatorMarkingState&, cppgc::Visitor&);
~ConservativeMarkingVisitor() override = default;
private:
- void VisitConservatively(HeapObjectHeader&,
- TraceConservativelyCallback) final;
+ void VisitFullyConstructedConservatively(HeapObjectHeader&) final;
+ void VisitInConstructionConservatively(HeapObjectHeader&,
+ TraceConservativelyCallback) final;
void VisitPointer(const void*) final;
- MarkingState& marking_state_;
+ MutatorMarkingState& marking_state_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/marking-worklists.cc b/deps/v8/src/heap/cppgc/marking-worklists.cc
index 15d78fd4cf..993b5e069d 100644
--- a/deps/v8/src/heap/cppgc/marking-worklists.cc
+++ b/deps/v8/src/heap/cppgc/marking-worklists.cc
@@ -16,6 +16,13 @@ void MarkingWorklists::ClearForTesting() {
previously_not_fully_constructed_worklist_.Clear();
write_barrier_worklist_.Clear();
weak_callback_worklist_.Clear();
+ concurrent_marking_bailout_worklist_.Clear();
+ discovered_ephemeron_pairs_worklist_.Clear();
+ ephemeron_pairs_for_processing_worklist_.Clear();
+}
+
+MarkingWorklists::ExternalMarkingWorklist::~ExternalMarkingWorklist() {
+ DCHECK(IsEmpty());
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/marking-worklists.h b/deps/v8/src/heap/cppgc/marking-worklists.h
index 96d11eef53..c769a14fb7 100644
--- a/deps/v8/src/heap/cppgc/marking-worklists.h
+++ b/deps/v8/src/heap/cppgc/marking-worklists.h
@@ -5,40 +5,93 @@
#ifndef V8_HEAP_CPPGC_MARKING_WORKLISTS_H_
#define V8_HEAP_CPPGC_MARKING_WORKLISTS_H_
+#include <unordered_set>
+
#include "include/cppgc/visitor.h"
+#include "src/base/platform/mutex.h"
#include "src/heap/base/worklist.h"
+#include "src/heap/cppgc/heap-object-header.h"
namespace cppgc {
namespace internal {
-class HeapObjectHeader;
-
class MarkingWorklists {
+ private:
+ class V8_EXPORT_PRIVATE ExternalMarkingWorklist {
+ public:
+ template <AccessMode = AccessMode::kNonAtomic>
+ void Push(HeapObjectHeader*);
+ template <AccessMode = AccessMode::kNonAtomic>
+ void Erase(HeapObjectHeader*);
+ template <AccessMode = AccessMode::kNonAtomic>
+ bool Contains(HeapObjectHeader*);
+ template <AccessMode = AccessMode::kNonAtomic>
+ std::unordered_set<HeapObjectHeader*> Extract();
+ template <AccessMode = AccessMode::kNonAtomic>
+ void Clear();
+ template <AccessMode = AccessMode::kNonAtomic>
+ bool IsEmpty();
+
+ ~ExternalMarkingWorklist();
+
+ private:
+ template <AccessMode>
+ struct ConditionalMutexGuard;
+
+ void* operator new(size_t) = delete;
+ void* operator new[](size_t) = delete;
+ void operator delete(void*) = delete;
+ void operator delete[](void*) = delete;
+
+ v8::base::Mutex lock_;
+ std::unordered_set<HeapObjectHeader*> objects_;
+ };
+
public:
static constexpr int kMutatorThreadId = 0;
using MarkingItem = cppgc::TraceDescriptor;
+
struct WeakCallbackItem {
cppgc::WeakCallback callback;
const void* parameter;
};
+ struct ConcurrentMarkingBailoutItem {
+ const void* parameter;
+ TraceCallback callback;
+ size_t bailedout_size;
+ };
+
+ struct EphemeronPairItem {
+ const void* key;
+ TraceDescriptor value_desc;
+ };
+
// Segment size of 512 entries necessary to avoid throughput regressions.
// Since the work list is currently a temporary object this is not a problem.
using MarkingWorklist =
heap::base::Worklist<MarkingItem, 512 /* local entries */>;
- using NotFullyConstructedWorklist =
+ using NotFullyConstructedWorklist = ExternalMarkingWorklist;
+ using PreviouslyNotFullyConstructedWorklist =
heap::base::Worklist<HeapObjectHeader*, 16 /* local entries */>;
using WeakCallbackWorklist =
heap::base::Worklist<WeakCallbackItem, 64 /* local entries */>;
using WriteBarrierWorklist =
heap::base::Worklist<HeapObjectHeader*, 64 /*local entries */>;
+ using ConcurrentMarkingBailoutWorklist =
+ heap::base::Worklist<ConcurrentMarkingBailoutItem,
+ 64 /* local entries */>;
+ using EphemeronPairsWorklist =
+ heap::base::Worklist<EphemeronPairItem, 64 /* local entries */>;
+ using WeakContainersWorklist = ExternalMarkingWorklist;
MarkingWorklist* marking_worklist() { return &marking_worklist_; }
NotFullyConstructedWorklist* not_fully_constructed_worklist() {
return &not_fully_constructed_worklist_;
}
- NotFullyConstructedWorklist* previously_not_fully_constructed_worklist() {
+ PreviouslyNotFullyConstructedWorklist*
+ previously_not_fully_constructed_worklist() {
return &previously_not_fully_constructed_worklist_;
}
WriteBarrierWorklist* write_barrier_worklist() {
@@ -47,17 +100,93 @@ class MarkingWorklists {
WeakCallbackWorklist* weak_callback_worklist() {
return &weak_callback_worklist_;
}
+ ConcurrentMarkingBailoutWorklist* concurrent_marking_bailout_worklist() {
+ return &concurrent_marking_bailout_worklist_;
+ }
+ EphemeronPairsWorklist* discovered_ephemeron_pairs_worklist() {
+ return &discovered_ephemeron_pairs_worklist_;
+ }
+ EphemeronPairsWorklist* ephemeron_pairs_for_processing_worklist() {
+ return &ephemeron_pairs_for_processing_worklist_;
+ }
+ WeakContainersWorklist* weak_containers_worklist() {
+ return &weak_containers_worklist_;
+ }
void ClearForTesting();
private:
MarkingWorklist marking_worklist_;
NotFullyConstructedWorklist not_fully_constructed_worklist_;
- NotFullyConstructedWorklist previously_not_fully_constructed_worklist_;
+ PreviouslyNotFullyConstructedWorklist
+ previously_not_fully_constructed_worklist_;
WriteBarrierWorklist write_barrier_worklist_;
WeakCallbackWorklist weak_callback_worklist_;
+ ConcurrentMarkingBailoutWorklist concurrent_marking_bailout_worklist_;
+ EphemeronPairsWorklist discovered_ephemeron_pairs_worklist_;
+ EphemeronPairsWorklist ephemeron_pairs_for_processing_worklist_;
+ WeakContainersWorklist weak_containers_worklist_;
+};
+
+template <>
+struct MarkingWorklists::ExternalMarkingWorklist::ConditionalMutexGuard<
+ AccessMode::kNonAtomic> {
+ explicit ConditionalMutexGuard(v8::base::Mutex*) {}
};
+template <>
+struct MarkingWorklists::ExternalMarkingWorklist::ConditionalMutexGuard<
+ AccessMode::kAtomic> {
+ explicit ConditionalMutexGuard(v8::base::Mutex* lock) : guard_(lock) {}
+
+ private:
+ v8::base::MutexGuard guard_;
+};
+
+template <AccessMode mode>
+void MarkingWorklists::ExternalMarkingWorklist::Push(HeapObjectHeader* object) {
+ DCHECK_NOT_NULL(object);
+ ConditionalMutexGuard<mode> guard(&lock_);
+ objects_.insert(object);
+}
+
+template <AccessMode mode>
+void MarkingWorklists::ExternalMarkingWorklist::Erase(
+ HeapObjectHeader* object) {
+ DCHECK_NOT_NULL(object);
+ ConditionalMutexGuard<mode> guard(&lock_);
+ objects_.erase(object);
+}
+
+template <AccessMode mode>
+bool MarkingWorklists::ExternalMarkingWorklist::Contains(
+ HeapObjectHeader* object) {
+ ConditionalMutexGuard<mode> guard(&lock_);
+ return objects_.find(object) != objects_.end();
+}
+
+template <AccessMode mode>
+std::unordered_set<HeapObjectHeader*>
+MarkingWorklists::ExternalMarkingWorklist::Extract() {
+ ConditionalMutexGuard<mode> guard(&lock_);
+ std::unordered_set<HeapObjectHeader*> extracted;
+ std::swap(extracted, objects_);
+ DCHECK(objects_.empty());
+ return extracted;
+}
+
+template <AccessMode mode>
+void MarkingWorklists::ExternalMarkingWorklist::Clear() {
+ ConditionalMutexGuard<mode> guard(&lock_);
+ objects_.clear();
+}
+
+template <AccessMode mode>
+bool MarkingWorklists::ExternalMarkingWorklist::IsEmpty() {
+ ConditionalMutexGuard<mode> guard(&lock_);
+ return objects_.empty();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/name-trait.cc b/deps/v8/src/heap/cppgc/name-trait.cc
new file mode 100644
index 0000000000..d42f5229b8
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/name-trait.cc
@@ -0,0 +1,41 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/name-trait.h"
+
+#include <stdio.h>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace cppgc {
+
+// static
+constexpr const char NameProvider::kHiddenName[];
+
+// static
+constexpr const char NameProvider::kNoNameDeducible[];
+
+namespace internal {
+
+// static
+HeapObjectName NameTraitBase::GetNameFromTypeSignature(const char* signature) {
+ // Parsing string of structure:
+ // static HeapObjectName NameTrait<int>::GetNameFor(...) [T = int]
+ if (!signature) return {NameProvider::kNoNameDeducible, true};
+
+ const std::string raw(signature);
+ const auto start_pos = raw.rfind("T = ") + 4;
+ DCHECK_NE(std::string::npos, start_pos);
+ const auto len = raw.length() - start_pos - 1;
+ const std::string name = raw.substr(start_pos, len).c_str();
+ char* name_buffer = new char[name.length() + 1];
+ int written = snprintf(name_buffer, name.length() + 1, "%s", name.c_str());
+ DCHECK_EQ(static_cast<size_t>(written), name.length());
+ USE(written);
+ return {name_buffer, false};
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/object-allocator.h b/deps/v8/src/heap/cppgc/object-allocator.h
index 85a8d29db2..5c857d2478 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.h
+++ b/deps/v8/src/heap/cppgc/object-allocator.h
@@ -118,12 +118,20 @@ void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace* space,
}
void* raw = current_lab.Allocate(size);
- SET_MEMORY_ACCESIBLE(raw, size);
+#if !defined(V8_USE_MEMORY_SANITIZER) && !defined(V8_USE_ADDRESS_SANITIZER) && \
+ DEBUG
+ // For debug builds, unzap only the payload.
+ SET_MEMORY_ACCESSIBLE(static_cast<char*>(raw) + sizeof(HeapObjectHeader),
+ size - sizeof(HeapObjectHeader));
+#else
+ SET_MEMORY_ACCESSIBLE(raw, size);
+#endif
auto* header = new (raw) HeapObjectHeader(size, gcinfo);
+ // The marker needs to find the object start concurrently.
NormalPage::From(BasePage::FromPayload(header))
->object_start_bitmap()
- .SetBit(reinterpret_cast<ConstAddress>(header));
+ .SetBit<AccessMode::kAtomic>(reinterpret_cast<ConstAddress>(header));
return header->Payload();
}
diff --git a/deps/v8/src/heap/cppgc/object-start-bitmap.h b/deps/v8/src/heap/cppgc/object-start-bitmap.h
index 2af6493939..38ba5ca886 100644
--- a/deps/v8/src/heap/cppgc/object-start-bitmap.h
+++ b/deps/v8/src/heap/cppgc/object-start-bitmap.h
@@ -44,19 +44,15 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
// Finds an object header based on a
// address_maybe_pointing_to_the_middle_of_object. Will search for an object
// start in decreasing address order.
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
inline HeapObjectHeader* FindHeader(
ConstAddress address_maybe_pointing_to_the_middle_of_object) const;
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
inline void SetBit(ConstAddress);
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
inline void ClearBit(ConstAddress);
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
inline bool CheckBit(ConstAddress) const;
// Iterates all object starts recorded in the bitmap.
@@ -71,11 +67,9 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
inline void Clear();
private:
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
inline void store(size_t cell_index, uint8_t value);
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
inline uint8_t load(size_t cell_index) const;
static constexpr size_t kBitsPerCell = sizeof(uint8_t) * CHAR_BIT;
@@ -88,7 +82,7 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
inline void ObjectStartIndexAndBit(ConstAddress, size_t*, size_t*) const;
- Address offset_;
+ const Address offset_;
// The bitmap contains a bit for every kGranularity aligned address on a
// a NormalPage, i.e., for a page of size kBlinkPageSize.
std::array<uint8_t, kReservedForBitmap> object_start_bit_map_;
@@ -98,7 +92,7 @@ ObjectStartBitmap::ObjectStartBitmap(Address offset) : offset_(offset) {
Clear();
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
HeapObjectHeader* ObjectStartBitmap::FindHeader(
ConstAddress address_maybe_pointing_to_the_middle_of_object) const {
DCHECK_LE(offset_, address_maybe_pointing_to_the_middle_of_object);
@@ -120,7 +114,7 @@ HeapObjectHeader* ObjectStartBitmap::FindHeader(
return reinterpret_cast<HeapObjectHeader*>(object_offset + offset_);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
void ObjectStartBitmap::SetBit(ConstAddress header_address) {
size_t cell_index, object_bit;
ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
@@ -129,7 +123,7 @@ void ObjectStartBitmap::SetBit(ConstAddress header_address) {
static_cast<uint8_t>(load(cell_index) | (1 << object_bit)));
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
void ObjectStartBitmap::ClearBit(ConstAddress header_address) {
size_t cell_index, object_bit;
ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
@@ -137,16 +131,16 @@ void ObjectStartBitmap::ClearBit(ConstAddress header_address) {
static_cast<uint8_t>(load(cell_index) & ~(1 << object_bit)));
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
bool ObjectStartBitmap::CheckBit(ConstAddress header_address) const {
size_t cell_index, object_bit;
ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
return load<mode>(cell_index) & (1 << object_bit);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
void ObjectStartBitmap::store(size_t cell_index, uint8_t value) {
- if (mode == HeapObjectHeader::AccessMode::kNonAtomic) {
+ if (mode == AccessMode::kNonAtomic) {
object_start_bit_map_[cell_index] = value;
return;
}
@@ -154,9 +148,9 @@ void ObjectStartBitmap::store(size_t cell_index, uint8_t value) {
->store(value, std::memory_order_release);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
uint8_t ObjectStartBitmap::load(size_t cell_index) const {
- if (mode == HeapObjectHeader::AccessMode::kNonAtomic) {
+ if (mode == AccessMode::kNonAtomic) {
return object_start_bit_map_[cell_index];
}
return v8::base::AsAtomicPtr(&object_start_bit_map_[cell_index])
@@ -204,15 +198,13 @@ class V8_EXPORT_PRIVATE PlatformAwareObjectStartBitmap
public:
explicit inline PlatformAwareObjectStartBitmap(Address offset);
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
inline void SetBit(ConstAddress);
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
inline void ClearBit(ConstAddress);
private:
- template <HeapObjectHeader::AccessMode>
+ template <AccessMode>
static bool ShouldForceNonAtomic();
};
@@ -220,11 +212,11 @@ PlatformAwareObjectStartBitmap::PlatformAwareObjectStartBitmap(Address offset)
: ObjectStartBitmap(offset) {}
// static
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
bool PlatformAwareObjectStartBitmap::ShouldForceNonAtomic() {
#if defined(V8_TARGET_ARCH_ARM)
// Use non-atomic accesses on ARMv7 when marking is not active.
- if (mode == HeapObjectHeader::AccessMode::kAtomic) {
+ if (mode == AccessMode::kAtomic) {
if (V8_LIKELY(!ProcessHeap::IsAnyIncrementalOrConcurrentMarking()))
return true;
}
@@ -232,21 +224,19 @@ bool PlatformAwareObjectStartBitmap::ShouldForceNonAtomic() {
return false;
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
void PlatformAwareObjectStartBitmap::SetBit(ConstAddress header_address) {
if (ShouldForceNonAtomic<mode>()) {
- ObjectStartBitmap::SetBit<HeapObjectHeader::AccessMode::kNonAtomic>(
- header_address);
+ ObjectStartBitmap::SetBit<AccessMode::kNonAtomic>(header_address);
return;
}
ObjectStartBitmap::SetBit<mode>(header_address);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
void PlatformAwareObjectStartBitmap::ClearBit(ConstAddress header_address) {
if (ShouldForceNonAtomic<mode>()) {
- ObjectStartBitmap::ClearBit<HeapObjectHeader::AccessMode::kNonAtomic>(
- header_address);
+ ObjectStartBitmap::ClearBit<AccessMode::kNonAtomic>(header_address);
return;
}
ObjectStartBitmap::ClearBit<mode>(header_address);
diff --git a/deps/v8/src/heap/cppgc/persistent-node.cc b/deps/v8/src/heap/cppgc/persistent-node.cc
index 9c5113f86a..b9585f4be7 100644
--- a/deps/v8/src/heap/cppgc/persistent-node.cc
+++ b/deps/v8/src/heap/cppgc/persistent-node.cc
@@ -8,6 +8,7 @@
#include <numeric>
#include "include/cppgc/persistent.h"
+#include "src/heap/cppgc/process-heap.h"
namespace cppgc {
namespace internal {
@@ -68,5 +69,13 @@ void PersistentRegion::Trace(Visitor* visitor) {
nodes_.end());
}
+PersistentRegionLock::PersistentRegionLock() {
+ g_process_mutex.Pointer()->Lock();
+}
+
+PersistentRegionLock::~PersistentRegionLock() {
+ g_process_mutex.Pointer()->Unlock();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/pointer-policies.cc b/deps/v8/src/heap/cppgc/pointer-policies.cc
index 0c45a7ae54..4fc5abb279 100644
--- a/deps/v8/src/heap/cppgc/pointer-policies.cc
+++ b/deps/v8/src/heap/cppgc/pointer-policies.cc
@@ -31,5 +31,17 @@ PersistentRegion& WeakPersistentPolicy::GetPersistentRegion(void* object) {
return heap->GetWeakPersistentRegion();
}
+PersistentRegion& StrongCrossThreadPersistentPolicy::GetPersistentRegion(
+ void* object) {
+ auto* heap = BasePage::FromPayload(object)->heap();
+ return heap->GetStrongCrossThreadPersistentRegion();
+}
+
+PersistentRegion& WeakCrossThreadPersistentPolicy::GetPersistentRegion(
+ void* object) {
+ auto* heap = BasePage::FromPayload(object)->heap();
+ return heap->GetWeakCrossThreadPersistentRegion();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/process-heap.cc b/deps/v8/src/heap/cppgc/process-heap.cc
index 1408988396..76a4a5dff5 100644
--- a/deps/v8/src/heap/cppgc/process-heap.cc
+++ b/deps/v8/src/heap/cppgc/process-heap.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/heap/cppgc/process-heap.h"
+
#include "include/cppgc/internal/process-heap.h"
namespace cppgc {
@@ -9,5 +11,7 @@ namespace internal {
AtomicEntryFlag ProcessHeap::concurrent_marking_flag_;
+v8::base::LazyMutex g_process_mutex = LAZY_MUTEX_INITIALIZER;
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/process-heap.h b/deps/v8/src/heap/cppgc/process-heap.h
new file mode 100644
index 0000000000..8afc7c88eb
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/process-heap.h
@@ -0,0 +1,18 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_PROCESS_HEAP_H_
+#define V8_HEAP_CPPGC_PROCESS_HEAP_H_
+
+#include "src/base/platform/mutex.h"
+
+namespace cppgc {
+namespace internal {
+
+extern v8::base::LazyMutex g_process_mutex;
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_PROCESS_HEAP_H_
diff --git a/deps/v8/src/heap/cppgc/raw-heap.cc b/deps/v8/src/heap/cppgc/raw-heap.cc
index 19200ae8a2..f45039c870 100644
--- a/deps/v8/src/heap/cppgc/raw-heap.cc
+++ b/deps/v8/src/heap/cppgc/raw-heap.cc
@@ -12,17 +12,20 @@ namespace internal {
// static
constexpr size_t RawHeap::kNumberOfRegularSpaces;
-RawHeap::RawHeap(HeapBase* heap, size_t custom_spaces) : main_heap_(heap) {
+RawHeap::RawHeap(
+ HeapBase* heap,
+ const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces)
+ : main_heap_(heap) {
size_t i = 0;
for (; i < static_cast<size_t>(RegularSpaceType::kLarge); ++i) {
- spaces_.push_back(std::make_unique<NormalPageSpace>(this, i));
+ spaces_.push_back(std::make_unique<NormalPageSpace>(this, i, false));
}
spaces_.push_back(std::make_unique<LargePageSpace>(
this, static_cast<size_t>(RegularSpaceType::kLarge)));
DCHECK_EQ(kNumberOfRegularSpaces, spaces_.size());
- for (size_t j = 0; j < custom_spaces; j++) {
- spaces_.push_back(
- std::make_unique<NormalPageSpace>(this, kNumberOfRegularSpaces + j));
+ for (size_t j = 0; j < custom_spaces.size(); j++) {
+ spaces_.push_back(std::make_unique<NormalPageSpace>(
+ this, kNumberOfRegularSpaces + j, custom_spaces[j]->IsCompactable()));
}
}
diff --git a/deps/v8/src/heap/cppgc/raw-heap.h b/deps/v8/src/heap/cppgc/raw-heap.h
index 79a278546b..fceaeae594 100644
--- a/deps/v8/src/heap/cppgc/raw-heap.h
+++ b/deps/v8/src/heap/cppgc/raw-heap.h
@@ -47,7 +47,8 @@ class V8_EXPORT_PRIVATE RawHeap final {
using iterator = Spaces::iterator;
using const_iterator = Spaces::const_iterator;
- explicit RawHeap(HeapBase* heap, size_t custom_spaces);
+ RawHeap(HeapBase* heap,
+ const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces);
RawHeap(const RawHeap&) = delete;
RawHeap& operator=(const RawHeap&) = delete;
diff --git a/deps/v8/src/heap/cppgc/sanitizers.h b/deps/v8/src/heap/cppgc/sanitizers.h
index 17f6cd7306..c3a8ff684d 100644
--- a/deps/v8/src/heap/cppgc/sanitizers.h
+++ b/deps/v8/src/heap/cppgc/sanitizers.h
@@ -47,27 +47,32 @@
// API for newly allocated or reclaimed memory.
#if defined(V8_USE_MEMORY_SANITIZER)
-#define SET_MEMORY_ACCESIBLE(address, size) \
- MSAN_UNPOISON(address, size); \
- memset((address), 0, (size))
-#define SET_MEMORY_INACCESIBLE(address, size) MSAN_POISON((address), (size))
-#elif DEBUG || defined(V8_USE_ADDRESS_SANITIZER)
-#define SET_MEMORY_ACCESIBLE(address, size) \
- ASAN_UNPOISON_MEMORY_REGION(address, size); \
- memset((address), 0, (size))
-#define SET_MEMORY_INACCESIBLE(address, size) \
- ::cppgc::internal::ZapMemory((address), (size)); \
+#define SET_MEMORY_ACCESSIBLE(address, size) MSAN_UNPOISON(address, size);
+#define SET_MEMORY_INACCESSIBLE(address, size) \
+ memset((address), 0, (size)); \
+ MSAN_POISON((address), (size))
+#elif defined(V8_USE_ADDRESS_SANITIZER)
+#define SET_MEMORY_ACCESSIBLE(address, size) \
+ ASAN_UNPOISON_MEMORY_REGION(address, size);
+#define SET_MEMORY_INACCESSIBLE(address, size) \
+ memset((address), 0, (size)); \
ASAN_POISON_MEMORY_REGION(address, size)
+#elif DEBUG
+#define SET_MEMORY_ACCESSIBLE(address, size) memset((address), 0, (size))
+#define SET_MEMORY_INACCESSIBLE(address, size) \
+ ::cppgc::internal::ZapMemory((address), (size));
#else
-#define SET_MEMORY_ACCESIBLE(address, size) memset((address), 0, (size))
-#define SET_MEMORY_INACCESIBLE(address, size) ((void)(address), (void)(size))
+#define SET_MEMORY_ACCESSIBLE(address, size) ((void)(address), (void)(size))
+#define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size))
#endif
namespace cppgc {
namespace internal {
inline void ZapMemory(void* address, size_t size) {
- static constexpr uint8_t kZappedValue = 0xcd;
+ // The lowest bit of the zapped value should be 0 so that zapped object
+ // are never viewed as fully constructed objects.
+ static constexpr uint8_t kZappedValue = 0xdc;
memset(address, kZappedValue, size);
}
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index 986ea6f4fa..5b28d3659c 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -114,7 +114,7 @@ using SpaceStates = std::vector<SpaceState>;
void StickyUnmark(HeapObjectHeader* header) {
// Young generation in Oilpan uses sticky mark bits.
#if !defined(CPPGC_YOUNG_GENERATION)
- header->Unmark<HeapObjectHeader::AccessMode::kAtomic>();
+ header->Unmark<AccessMode::kAtomic>();
#endif
}
@@ -127,7 +127,7 @@ class InlinedFinalizationBuilder final {
void AddFinalizer(HeapObjectHeader* header, size_t size) {
header->Finalize();
- SET_MEMORY_INACCESIBLE(header, size);
+ SET_MEMORY_INACCESSIBLE(header, size);
}
void AddFreeListEntry(Address start, size_t size) {
@@ -153,7 +153,7 @@ class DeferredFinalizationBuilder final {
result_.unfinalized_objects.push_back({header});
found_finalizer_ = true;
} else {
- SET_MEMORY_INACCESIBLE(header, size);
+ SET_MEMORY_INACCESSIBLE(header, size);
}
}
@@ -178,7 +178,7 @@ class DeferredFinalizationBuilder final {
template <typename FinalizationBuilder>
typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
- constexpr auto kAtomicAccess = HeapObjectHeader::AccessMode::kAtomic;
+ constexpr auto kAtomicAccess = AccessMode::kAtomic;
FinalizationBuilder builder(page);
PlatformAwareObjectStartBitmap& bitmap = page->object_start_bitmap();
@@ -191,7 +191,7 @@ typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
const size_t size = header->GetSize();
// Check if this is a free list entry.
if (header->IsFree<kAtomicAccess>()) {
- SET_MEMORY_INACCESIBLE(header, std::min(kFreeListEntrySize, size));
+ SET_MEMORY_INACCESSIBLE(header, std::min(kFreeListEntrySize, size));
begin += size;
continue;
}
@@ -273,7 +273,9 @@ class SweepFinalizer final {
// Call finalizers.
for (HeapObjectHeader* object : page_state->unfinalized_objects) {
+ const size_t size = object->GetSize();
object->Finalize();
+ SET_MEMORY_INACCESSIBLE(object, size);
}
// Unmap page if empty.
@@ -444,10 +446,19 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
// - moves all Heap pages to local Sweeper's state (SpaceStates).
class PrepareForSweepVisitor final
: public HeapVisitor<PrepareForSweepVisitor> {
+ using CompactableSpaceHandling =
+ Sweeper::SweepingConfig::CompactableSpaceHandling;
+
public:
- explicit PrepareForSweepVisitor(SpaceStates* states) : states_(states) {}
+ PrepareForSweepVisitor(SpaceStates* states,
+ CompactableSpaceHandling compactable_space_handling)
+ : states_(states),
+ compactable_space_handling_(compactable_space_handling) {}
bool VisitNormalPageSpace(NormalPageSpace* space) {
+ if ((compactable_space_handling_ == CompactableSpaceHandling::kIgnore) &&
+ space->is_compactable())
+ return true;
DCHECK(!space->linear_allocation_buffer().size());
space->free_list().Clear();
ExtractPages(space);
@@ -467,6 +478,7 @@ class PrepareForSweepVisitor final
}
SpaceStates* states_;
+ CompactableSpaceHandling compactable_space_handling_;
};
} // namespace
@@ -483,17 +495,20 @@ class Sweeper::SweeperImpl final {
~SweeperImpl() { CancelSweepers(); }
- void Start(Config config) {
+ void Start(SweepingConfig config) {
is_in_progress_ = true;
#if DEBUG
+ // Verify bitmap for all spaces regardless of |compactable_space_handling|.
ObjectStartBitmapVerifier().Verify(heap_);
#endif
- PrepareForSweepVisitor(&space_states_).Traverse(heap_);
+ PrepareForSweepVisitor(&space_states_, config.compactable_space_handling)
+ .Traverse(heap_);
- if (config == Config::kAtomic) {
+ if (config.sweeping_type == SweepingConfig::SweepingType::kAtomic) {
Finish();
} else {
- DCHECK_EQ(Config::kIncrementalAndConcurrent, config);
+ DCHECK_EQ(SweepingConfig::SweepingType::kIncrementalAndConcurrent,
+ config.sweeping_type);
ScheduleIncrementalSweeping();
ScheduleConcurrentSweeping();
}
@@ -502,6 +517,11 @@ class Sweeper::SweeperImpl final {
void FinishIfRunning() {
if (!is_in_progress_) return;
+ if (concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid() &&
+ concurrent_sweeper_handle_->UpdatePriorityEnabled()) {
+ concurrent_sweeper_handle_->UpdatePriority(
+ cppgc::TaskPriority::kUserBlocking);
+ }
Finish();
}
@@ -524,6 +544,10 @@ class Sweeper::SweeperImpl final {
stats_collector_->NotifySweepingCompleted();
}
+ void WaitForConcurrentSweepingForTesting() {
+ if (concurrent_sweeper_handle_) concurrent_sweeper_handle_->Join();
+ }
+
private:
class IncrementalSweepTask : public cppgc::IdleTask {
public:
@@ -563,14 +587,17 @@ class Sweeper::SweeperImpl final {
};
void ScheduleIncrementalSweeping() {
- if (!platform_ || !foreground_task_runner_) return;
+ DCHECK(platform_);
+ if (!foreground_task_runner_ ||
+ !foreground_task_runner_->IdleTasksEnabled())
+ return;
incremental_sweeper_handle_ =
IncrementalSweepTask::Post(this, foreground_task_runner_.get());
}
void ScheduleConcurrentSweeping() {
- if (!platform_) return;
+ DCHECK(platform_);
concurrent_sweeper_handle_ = platform_->PostJob(
cppgc::TaskPriority::kUserVisible,
@@ -579,7 +606,8 @@ class Sweeper::SweeperImpl final {
void CancelSweepers() {
if (incremental_sweeper_handle_) incremental_sweeper_handle_.Cancel();
- if (concurrent_sweeper_handle_) concurrent_sweeper_handle_->Cancel();
+ if (concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid())
+ concurrent_sweeper_handle_->Cancel();
}
void SynchronizeAndFinalizeConcurrentSweeping() {
@@ -605,8 +633,11 @@ Sweeper::Sweeper(RawHeap* heap, cppgc::Platform* platform,
Sweeper::~Sweeper() = default;
-void Sweeper::Start(Config config) { impl_->Start(config); }
+void Sweeper::Start(SweepingConfig config) { impl_->Start(config); }
void Sweeper::FinishIfRunning() { impl_->FinishIfRunning(); }
+void Sweeper::WaitForConcurrentSweepingForTesting() {
+ impl_->WaitForConcurrentSweepingForTesting();
+}
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/sweeper.h b/deps/v8/src/heap/cppgc/sweeper.h
index e94036521e..cb5824cc8c 100644
--- a/deps/v8/src/heap/cppgc/sweeper.h
+++ b/deps/v8/src/heap/cppgc/sweeper.h
@@ -17,10 +17,18 @@ namespace internal {
class StatsCollector;
class RawHeap;
+class ConcurrentSweeperTest;
class V8_EXPORT_PRIVATE Sweeper final {
public:
- enum class Config { kAtomic, kIncrementalAndConcurrent };
+ struct SweepingConfig {
+ enum class SweepingType : uint8_t { kAtomic, kIncrementalAndConcurrent };
+ enum class CompactableSpaceHandling { kSweep, kIgnore };
+
+ SweepingType sweeping_type = SweepingType::kIncrementalAndConcurrent;
+ CompactableSpaceHandling compactable_space_handling =
+ CompactableSpaceHandling::kSweep;
+ };
Sweeper(RawHeap*, cppgc::Platform*, StatsCollector*);
~Sweeper();
@@ -29,12 +37,16 @@ class V8_EXPORT_PRIVATE Sweeper final {
Sweeper& operator=(const Sweeper&) = delete;
// Sweeper::Start assumes the heap holds no linear allocation buffers.
- void Start(Config);
+ void Start(SweepingConfig);
void FinishIfRunning();
private:
+ void WaitForConcurrentSweepingForTesting();
+
class SweeperImpl;
std::unique_ptr<SweeperImpl> impl_;
+
+ friend class ConcurrentSweeperTest;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/trace-trait.cc b/deps/v8/src/heap/cppgc/trace-trait.cc
index c0e9b342db..9f410b9c12 100644
--- a/deps/v8/src/heap/cppgc/trace-trait.cc
+++ b/deps/v8/src/heap/cppgc/trace-trait.cc
@@ -16,12 +16,10 @@ TraceDescriptor TraceTraitFromInnerAddressImpl::GetTraceDescriptor(
// mixins.
const HeapObjectHeader& header =
BasePage::FromPayload(address)
- ->ObjectHeaderFromInnerAddress<HeapObjectHeader::AccessMode::kAtomic>(
- address);
- return {header.Payload(),
- GlobalGCInfoTable::GCInfoFromIndex(
- header.GetGCInfoIndex<HeapObjectHeader::AccessMode::kAtomic>())
- .trace};
+ ->ObjectHeaderFromInnerAddress<AccessMode::kAtomic>(address);
+ return {header.Payload(), GlobalGCInfoTable::GCInfoFromIndex(
+ header.GetGCInfoIndex<AccessMode::kAtomic>())
+ .trace};
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/visitor.cc b/deps/v8/src/heap/cppgc/visitor.cc
index 61eedf3bd9..33786f6fce 100644
--- a/deps/v8/src/heap/cppgc/visitor.cc
+++ b/deps/v8/src/heap/cppgc/visitor.cc
@@ -68,15 +68,20 @@ void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
HeapObjectHeader& header) {
- if (!header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>()) {
- visitor_.Visit(
- header.Payload(),
- {header.Payload(),
- GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
+ if (!header.IsInConstruction<AccessMode::kNonAtomic>()) {
+ VisitFullyConstructedConservatively(header);
} else {
- VisitConservatively(header, TraceConservatively);
+ VisitInConstructionConservatively(header, TraceConservatively);
}
}
+void ConservativeTracingVisitor::VisitFullyConstructedConservatively(
+ HeapObjectHeader& header) {
+ visitor_.Visit(
+ header.Payload(),
+ {header.Payload(),
+ GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/visitor.h b/deps/v8/src/heap/cppgc/visitor.h
index c8395ffa98..3b0f185ccb 100644
--- a/deps/v8/src/heap/cppgc/visitor.h
+++ b/deps/v8/src/heap/cppgc/visitor.h
@@ -31,14 +31,8 @@ class VisitorBase : public cppgc::Visitor {
VisitorBase(const VisitorBase&) = delete;
VisitorBase& operator=(const VisitorBase&) = delete;
- template <typename T>
- void TraceRootForTesting(const Persistent<T>& p, const SourceLocation& loc) {
- TraceRoot(p, loc);
- }
-
- template <typename T>
- void TraceRootForTesting(const WeakPersistent<T>& p,
- const SourceLocation& loc) {
+ template <typename Persistent>
+ void TraceRootForTesting(const Persistent& p, const SourceLocation& loc) {
TraceRoot(p, loc);
}
};
@@ -59,8 +53,10 @@ class ConservativeTracingVisitor {
protected:
using TraceConservativelyCallback = void(ConservativeTracingVisitor*,
const HeapObjectHeader&);
- virtual void VisitConservatively(HeapObjectHeader&,
- TraceConservativelyCallback) {}
+ virtual void V8_EXPORT_PRIVATE
+ VisitFullyConstructedConservatively(HeapObjectHeader&);
+ virtual void VisitInConstructionConservatively(HeapObjectHeader&,
+ TraceConservativelyCallback) {}
HeapBase& heap_;
PageBackend& page_backend_;
diff --git a/deps/v8/src/heap/cppgc/write-barrier.cc b/deps/v8/src/heap/cppgc/write-barrier.cc
index 4a076e8653..795bed8439 100644
--- a/deps/v8/src/heap/cppgc/write-barrier.cc
+++ b/deps/v8/src/heap/cppgc/write-barrier.cc
@@ -34,9 +34,12 @@ void MarkValue(const BasePage* page, MarkerBase* marker, const void* value) {
DCHECK(marker);
- if (V8_UNLIKELY(
- header
- .IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>())) {
+ if (V8_UNLIKELY(header.IsInConstruction<AccessMode::kNonAtomic>())) {
+ // In construction objects are traced only if they are unmarked. If marking
+ // reaches this object again when it is fully constructed, it will re-mark
+ // it and tracing it as a previously not fully constructed object would know
+ // to bail out.
+ header.Unmark<AccessMode::kAtomic>();
marker->WriteBarrierForInConstructionObject(header);
return;
}
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index 9c926bed69..01c0402f7e 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -86,19 +86,33 @@ LocalEmbedderHeapTracer::ProcessingScope::~ProcessingScope() {
}
}
+// static
+LocalEmbedderHeapTracer::WrapperInfo
+LocalEmbedderHeapTracer::ExtractWrapperInfo(Isolate* isolate,
+ JSObject js_object) {
+ DCHECK_GE(js_object.GetEmbedderFieldCount(), 2);
+ DCHECK(js_object.IsApiWrapper());
+
+ WrapperInfo info;
+ if (EmbedderDataSlot(js_object, 0)
+ .ToAlignedPointerSafe(isolate, &info.first) &&
+ info.first &&
+ EmbedderDataSlot(js_object, 1)
+ .ToAlignedPointerSafe(isolate, &info.second)) {
+ return info;
+ }
+ return {nullptr, nullptr};
+}
+
void LocalEmbedderHeapTracer::ProcessingScope::TracePossibleWrapper(
JSObject js_object) {
DCHECK(js_object.IsApiWrapper());
if (js_object.GetEmbedderFieldCount() < 2) return;
- void* pointer0;
- void* pointer1;
- if (EmbedderDataSlot(js_object, 0)
- .ToAlignedPointer(tracer_->isolate_, &pointer0) &&
- pointer0 &&
- EmbedderDataSlot(js_object, 1)
- .ToAlignedPointer(tracer_->isolate_, &pointer1)) {
- wrapper_cache_.push_back({pointer0, pointer1});
+ WrapperInfo info =
+ LocalEmbedderHeapTracer::ExtractWrapperInfo(tracer_->isolate_, js_object);
+ if (VerboseWrapperInfo(info).is_valid()) {
+ wrapper_cache_.push_back(std::move(info));
}
FlushWrapperCacheIfFull();
}
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 728ede4452..5aff187ed3 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -20,6 +20,23 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
using WrapperInfo = std::pair<void*, void*>;
using WrapperCache = std::vector<WrapperInfo>;
+ // WrapperInfo is passed over the API. Use VerboseWrapperInfo to access pair
+ // internals in a named way. See ProcessingScope::TracePossibleJSWrapper()
+ // below on how a V8 object is parsed to gather the information.
+ struct VerboseWrapperInfo {
+ explicit VerboseWrapperInfo(const WrapperInfo& raw_info)
+ : raw_info(raw_info) {}
+
+ // Information describing the type pointed to via instance().
+ void* type_info() const { return raw_info.first; }
+ // Direct pointer to an instance described by type_info().
+ void* instance() const { return raw_info.second; }
+
+ bool is_valid() const { return type_info(); }
+
+ const WrapperInfo& raw_info;
+ };
+
class V8_EXPORT_PRIVATE ProcessingScope {
public:
explicit ProcessingScope(LocalEmbedderHeapTracer* tracer);
@@ -38,6 +55,8 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
WrapperCache wrapper_cache_;
};
+ static WrapperInfo ExtractWrapperInfo(Isolate* isolate, JSObject js_object);
+
explicit LocalEmbedderHeapTracer(Isolate* isolate) : isolate_(isolate) {}
~LocalEmbedderHeapTracer() {
diff --git a/deps/v8/src/heap/factory-base.cc b/deps/v8/src/heap/factory-base.cc
index 51a856809a..a87611e068 100644
--- a/deps/v8/src/heap/factory-base.cc
+++ b/deps/v8/src/heap/factory-base.cc
@@ -21,6 +21,7 @@
#include "src/objects/shared-function-info-inl.h"
#include "src/objects/source-text-module.h"
#include "src/objects/string-inl.h"
+#include "src/objects/string.h"
#include "src/objects/template-objects-inl.h"
namespace v8 {
@@ -195,8 +196,8 @@ Handle<BytecodeArray> FactoryBase<Impl>::NewBytecodeArray(
instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
instance->set_constant_pool(*constant_pool);
instance->set_handler_table(read_only_roots().empty_byte_array());
- instance->set_synchronized_source_position_table(
- read_only_roots().undefined_value());
+ instance->set_source_position_table(read_only_roots().undefined_value(),
+ kReleaseStore);
CopyBytes(reinterpret_cast<byte*>(instance->GetFirstBytecodeAddress()),
raw_bytecodes, length);
instance->clear_padding();
@@ -312,9 +313,9 @@ Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfo(
bool has_shared_name = maybe_name.ToHandle(&shared_name);
if (has_shared_name) {
DCHECK(shared_name->IsFlat());
- shared->set_name_or_scope_info(*shared_name);
+ shared->set_name_or_scope_info(*shared_name, kReleaseStore);
} else {
- DCHECK_EQ(shared->name_or_scope_info(),
+ DCHECK_EQ(shared->name_or_scope_info(kAcquireLoad),
SharedFunctionInfo::kNoSharedNameSentinel);
}
@@ -325,11 +326,12 @@ Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfo(
DCHECK(!Builtins::IsBuiltinId(maybe_builtin_index));
DCHECK_IMPLIES(function_data->IsCode(),
!Code::cast(*function_data).is_builtin());
- shared->set_function_data(*function_data);
+ shared->set_function_data(*function_data, kReleaseStore);
} else if (Builtins::IsBuiltinId(maybe_builtin_index)) {
shared->set_builtin_id(maybe_builtin_index);
} else {
- shared->set_builtin_id(Builtins::kIllegal);
+ DCHECK(shared->HasBuiltinId());
+ DCHECK_EQ(Builtins::kIllegal, shared->builtin_id());
}
shared->CalculateConstructAsBuiltin();
@@ -409,14 +411,14 @@ FactoryBase<Impl>::NewTemplateObjectDescription(
template <typename Impl>
Handle<FeedbackMetadata> FactoryBase<Impl>::NewFeedbackMetadata(
- int slot_count, int feedback_cell_count, AllocationType allocation) {
+ int slot_count, int create_closure_slot_count, AllocationType allocation) {
DCHECK_LE(0, slot_count);
int size = FeedbackMetadata::SizeFor(slot_count);
HeapObject result = AllocateRawWithImmortalMap(
size, allocation, read_only_roots().feedback_metadata_map());
Handle<FeedbackMetadata> data(FeedbackMetadata::cast(result), isolate());
data->set_slot_count(slot_count);
- data->set_closure_feedback_cell_count(feedback_cell_count);
+ data->set_create_closure_slot_count(create_closure_slot_count);
// Initialize the data section to 0.
int data_size = size - FeedbackMetadata::kHeaderSize;
@@ -435,7 +437,7 @@ Handle<CoverageInfo> FactoryBase<Impl>::NewCoverageInfo(
int size = CoverageInfo::SizeFor(slot_count);
Map map = read_only_roots().coverage_info_map();
HeapObject result =
- AllocateRawWithImmortalMap(size, AllocationType::kYoung, map);
+ AllocateRawWithImmortalMap(size, AllocationType::kOld, map);
Handle<CoverageInfo> info(CoverageInfo::cast(result), isolate());
info->set_slot_count(slot_count);
@@ -507,7 +509,8 @@ Handle<SeqOneByteString> FactoryBase<Impl>::NewOneByteInternalizedString(
Handle<SeqOneByteString> result =
AllocateRawOneByteInternalizedString(str.length(), hash_field);
DisallowHeapAllocation no_gc;
- MemCopy(result->GetChars(no_gc), str.begin(), str.length());
+ MemCopy(result->GetChars(no_gc, SharedStringAccessGuardIfNeeded::NotNeeded()),
+ str.begin(), str.length());
return result;
}
@@ -517,7 +520,8 @@ Handle<SeqTwoByteString> FactoryBase<Impl>::NewTwoByteInternalizedString(
Handle<SeqTwoByteString> result =
AllocateRawTwoByteInternalizedString(str.length(), hash_field);
DisallowHeapAllocation no_gc;
- MemCopy(result->GetChars(no_gc), str.begin(), str.length() * kUC16Size);
+ MemCopy(result->GetChars(no_gc, SharedStringAccessGuardIfNeeded::NotNeeded()),
+ str.begin(), str.length() * kUC16Size);
return result;
}
@@ -605,21 +609,31 @@ MaybeHandle<String> FactoryBase<Impl>::NewConsString(
Handle<SeqOneByteString> result =
NewRawOneByteString(length, allocation).ToHandleChecked();
DisallowHeapAllocation no_gc;
- uint8_t* dest = result->GetChars(no_gc);
+ uint8_t* dest =
+ result->GetChars(no_gc, SharedStringAccessGuardIfNeeded::NotNeeded());
// Copy left part.
- const uint8_t* src = left->template GetChars<uint8_t>(no_gc);
- CopyChars(dest, src, left_length);
+ {
+ SharedStringAccessGuardIfNeeded access_guard(*left);
+ const uint8_t* src =
+ left->template GetChars<uint8_t>(no_gc, access_guard);
+ CopyChars(dest, src, left_length);
+ }
// Copy right part.
- src = right->template GetChars<uint8_t>(no_gc);
- CopyChars(dest + left_length, src, right_length);
+ {
+ SharedStringAccessGuardIfNeeded access_guard(*right);
+ const uint8_t* src =
+ right->template GetChars<uint8_t>(no_gc, access_guard);
+ CopyChars(dest + left_length, src, right_length);
+ }
return result;
}
Handle<SeqTwoByteString> result =
NewRawTwoByteString(length, allocation).ToHandleChecked();
- DisallowHeapAllocation pointer_stays_valid;
- uc16* sink = result->GetChars(pointer_stays_valid);
+ DisallowHeapAllocation no_gc;
+ uc16* sink =
+ result->GetChars(no_gc, SharedStringAccessGuardIfNeeded::NotNeeded());
String::WriteToFlat(*left, sink, 0, left->length());
String::WriteToFlat(*right, sink + left->length(), 0, right->length());
return result;
@@ -777,7 +791,9 @@ template <typename Impl>
HeapObject FactoryBase<Impl>::AllocateRawArray(int size,
AllocationType allocation) {
HeapObject result = AllocateRaw(size, allocation);
- if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
+ if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
+ (size > Heap::MaxRegularHeapObjectSize(allocation)) &&
+ FLAG_use_marking_progress_bar) {
BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
}
diff --git a/deps/v8/src/heap/factory-base.h b/deps/v8/src/heap/factory-base.h
index dc95e16a6a..847802629e 100644
--- a/deps/v8/src/heap/factory-base.h
+++ b/deps/v8/src/heap/factory-base.h
@@ -137,7 +137,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase {
// Allocates a FeedbackMedata object and zeroes the data section.
Handle<FeedbackMetadata> NewFeedbackMetadata(
- int slot_count, int feedback_cell_count,
+ int slot_count, int create_closure_slot_count,
AllocationType allocation = AllocationType::kOld);
Handle<CoverageInfo> NewCoverageInfo(const ZoneVector<SourceRange>& slots);
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 7e66123681..3c2f2d167e 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -59,6 +59,7 @@
#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/string-set-inl.h"
#include "src/objects/struct-inl.h"
+#include "src/objects/synthetic-module-inl.h"
#include "src/objects/template-objects-inl.h"
#include "src/objects/transitions-inl.h"
#include "src/roots/roots.h"
@@ -67,25 +68,6 @@
namespace v8 {
namespace internal {
-namespace {
-
-int ComputeCodeObjectSize(const CodeDesc& desc) {
- bool has_unwinding_info = desc.unwinding_info != nullptr;
- DCHECK((has_unwinding_info && desc.unwinding_info_size > 0) ||
- (!has_unwinding_info && desc.unwinding_info_size == 0));
- int body_size = desc.instr_size;
- int unwinding_info_size_field_size = kInt64Size;
- if (has_unwinding_info) {
- body_size = RoundUp(body_size, kInt64Size) + desc.unwinding_info_size +
- unwinding_info_size_field_size;
- }
- int object_size = Code::SizeFor(RoundUp(body_size, kObjectAlignment));
- DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
- return object_size;
-}
-
-} // namespace
-
Factory::CodeBuilder::CodeBuilder(Isolate* isolate, const CodeDesc& desc,
CodeKind kind)
: isolate_(isolate),
@@ -138,32 +120,27 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
isolate_->heap()->SetBasicBlockProfilingData(new_list);
}
+ STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
+ const int object_size = Code::SizeFor(code_desc_.body_size());
+
Handle<Code> code;
{
- int object_size = ComputeCodeObjectSize(code_desc_);
Heap* heap = isolate_->heap();
CodePageCollectionMemoryModificationScope code_allocation(heap);
HeapObject result;
AllocationType allocation_type =
is_executable_ ? AllocationType::kCode : AllocationType::kReadOnly;
- AllocationAlignment alignment = is_executable_
- ? AllocationAlignment::kCodeAligned
- : AllocationAlignment::kWordAligned;
if (retry_allocation_or_fail) {
result = heap->AllocateRawWith<Heap::kRetryOrFail>(
- object_size, allocation_type, AllocationOrigin::kRuntime, alignment);
+ object_size, allocation_type, AllocationOrigin::kRuntime);
} else {
result = heap->AllocateRawWith<Heap::kLightRetry>(
- object_size, allocation_type, AllocationOrigin::kRuntime, alignment);
+ object_size, allocation_type, AllocationOrigin::kRuntime);
// Return an empty handle if we cannot allocate the code object.
if (result.is_null()) return MaybeHandle<Code>();
}
- if (!is_movable_) {
- result = heap->EnsureImmovableCode(result, object_size);
- }
-
// The code object has not been fully initialized yet. We rely on the
// fact that no allocation will happen from this point on.
DisallowHeapAllocation no_gc;
@@ -179,21 +156,22 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
}
constexpr bool kIsNotOffHeapTrampoline = false;
- const bool has_unwinding_info = code_desc_.unwinding_info != nullptr;
- code->set_raw_instruction_size(code_desc_.instr_size);
+ code->set_raw_instruction_size(code_desc_.instruction_size());
+ code->set_raw_metadata_size(code_desc_.metadata_size());
code->set_relocation_info(*reloc_info);
- code->initialize_flags(kind_, has_unwinding_info, is_turbofanned_,
- stack_slots_, kIsNotOffHeapTrampoline);
+ code->initialize_flags(kind_, is_turbofanned_, stack_slots_,
+ kIsNotOffHeapTrampoline);
code->set_builtin_index(builtin_index_);
code->set_inlined_bytecode_size(inlined_bytecode_size_);
- code->set_code_data_container(*data_container);
+ code->set_code_data_container(*data_container, kReleaseStore);
code->set_deoptimization_data(*deoptimization_data_);
code->set_source_position_table(*source_position_table_);
- code->set_safepoint_table_offset(code_desc_.safepoint_table_offset);
- code->set_handler_table_offset(code_desc_.handler_table_offset);
- code->set_constant_pool_offset(code_desc_.constant_pool_offset);
- code->set_code_comments_offset(code_desc_.code_comments_offset);
+ code->set_handler_table_offset(code_desc_.handler_table_offset_relative());
+ code->set_constant_pool_offset(code_desc_.constant_pool_offset_relative());
+ code->set_code_comments_offset(code_desc_.code_comments_offset_relative());
+ code->set_unwinding_info_offset(
+ code_desc_.unwinding_info_offset_relative());
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
@@ -396,7 +374,8 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(
AllocationResult allocation = heap->AllocateRaw(size, allocation_type);
HeapObject result;
if (!allocation.To(&result)) return MaybeHandle<FixedArray>();
- if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
+ if ((size > Heap::MaxRegularHeapObjectSize(allocation_type)) &&
+ FLAG_use_marking_progress_bar) {
BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
}
@@ -445,13 +424,12 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
*feedback_vector_map());
Handle<FeedbackVector> vector(FeedbackVector::cast(result), isolate());
vector->set_shared_function_info(*shared);
- vector->set_optimized_code_weak_or_smi(MaybeObject::FromSmi(Smi::FromEnum(
- FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
- : OptimizationMarker::kNone)));
+ vector->set_maybe_optimized_code(
+ HeapObjectReference::ClearedValue(isolate()));
vector->set_length(length);
vector->set_invocation_count(0);
vector->set_profiler_ticks(0);
- vector->clear_padding();
+ vector->InitializeOptimizationState();
vector->set_closure_feedback_cell_array(*closure_feedback_cell_array);
// TODO(leszeks): Initialize based on the feedback metadata.
@@ -473,6 +451,10 @@ Handle<EmbedderDataArray> Factory::NewEmbedderDataArray(int length) {
ObjectSlot end(array->slots_end());
size_t slot_count = end - start;
MemsetTagged(start, *undefined_value(), slot_count);
+ for (int i = 0; i < length; i++) {
+ // TODO(v8:10391, saelo): Handle external pointers in EmbedderDataSlot
+ EmbedderDataSlot(*array, i).AllocateExternalPointerEntry(isolate());
+ }
}
return array;
}
@@ -538,21 +520,27 @@ Handle<SmallOrderedNameDictionary> Factory::NewSmallOrderedNameDictionary(
}
Handle<OrderedHashSet> Factory::NewOrderedHashSet() {
- return OrderedHashSet::Allocate(isolate(), OrderedHashSet::kMinCapacity)
+ return OrderedHashSet::Allocate(isolate(), OrderedHashSet::kInitialCapacity,
+ AllocationType::kYoung)
.ToHandleChecked();
}
Handle<OrderedHashMap> Factory::NewOrderedHashMap() {
- return OrderedHashMap::Allocate(isolate(), OrderedHashMap::kMinCapacity)
+ return OrderedHashMap::Allocate(isolate(), OrderedHashMap::kInitialCapacity,
+ AllocationType::kYoung)
.ToHandleChecked();
}
-Handle<OrderedNameDictionary> Factory::NewOrderedNameDictionary() {
- return OrderedNameDictionary::Allocate(isolate(),
- OrderedNameDictionary::kMinCapacity)
+Handle<OrderedNameDictionary> Factory::NewOrderedNameDictionary(int capacity) {
+ return OrderedNameDictionary::Allocate(isolate(), capacity,
+ AllocationType::kYoung)
.ToHandleChecked();
}
+Handle<NameDictionary> Factory::NewNameDictionary(int at_least_space_for) {
+ return NameDictionary::New(isolate(), at_least_space_for);
+}
+
Handle<PropertyDescriptorObject> Factory::NewPropertyDescriptorObject() {
Handle<PropertyDescriptorObject> object =
Handle<PropertyDescriptorObject>::cast(
@@ -834,6 +822,7 @@ Handle<StringClass> Factory::InternalizeExternalString(Handle<String> string) {
Handle<Map> map = GetInternalizedStringMap(this, string).ToHandleChecked();
Handle<StringClass> external_string(
StringClass::cast(New(map, AllocationType::kOld)), isolate());
+ external_string->AllocateExternalPointerEntries(isolate());
external_string->set_length(cast_string->length());
external_string->set_hash_field(cast_string->hash_field());
external_string->SetResource(isolate(), nullptr);
@@ -959,6 +948,7 @@ MaybeHandle<String> Factory::NewExternalStringFromOneByte(
: uncached_external_one_byte_string_map();
Handle<ExternalOneByteString> external_string(
ExternalOneByteString::cast(New(map, AllocationType::kOld)), isolate());
+ external_string->AllocateExternalPointerEntries(isolate());
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
external_string->SetResource(isolate(), resource);
@@ -979,6 +969,7 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
: uncached_external_string_map();
Handle<ExternalTwoByteString> external_string(
ExternalTwoByteString::cast(New(map, AllocationType::kOld)), isolate());
+ external_string->AllocateExternalPointerEntries(isolate());
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
external_string->SetResource(isolate(), resource);
@@ -1063,6 +1054,7 @@ Handle<NativeContext> Factory::NewNativeContext() {
AllocationType::kOld));
context->set_native_context_map(*map);
map->set_native_context(*context);
+ context->AllocateExternalPointerEntries(isolate());
context->set_scope_info(ReadOnlyRoots(isolate()).native_scope_info());
context->set_previous(Context::unchecked_cast(Smi::zero()));
context->set_extension(*undefined_value());
@@ -1313,6 +1305,7 @@ Handle<Foreign> Factory::NewForeign(Address addr) {
HeapObject result = AllocateRawWithImmortalMap(map.instance_size(),
AllocationType::kYoung, map);
Handle<Foreign> foreign(Foreign::cast(result), isolate());
+ foreign->AllocateExternalPointerEntries(isolate());
foreign->set_foreign_address(isolate(), addr);
return foreign;
}
@@ -1324,6 +1317,7 @@ Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(Address type_address,
HeapObject result = AllocateRawWithImmortalMap(map.instance_size(),
AllocationType::kYoung, map);
Handle<WasmTypeInfo> info(WasmTypeInfo::cast(result), isolate());
+ info->AllocateExternalPointerEntries(isolate());
info->set_foreign_address(isolate(), type_address);
info->set_parent(*parent);
info->set_subtypes(*subtypes);
@@ -1463,7 +1457,8 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
map.SetInObjectUnusedPropertyFields(inobject_properties);
map.SetInstanceDescriptors(isolate(), *empty_descriptor_array(), 0);
if (FLAG_unbox_double_fields) {
- map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout(),
+ kReleaseStore);
}
// Must be called only after |instance_type|, |instance_size| and
// |layout_descriptor| are set.
@@ -1820,122 +1815,6 @@ DEFINE_ERROR(WasmLinkError, wasm_link_error)
DEFINE_ERROR(WasmRuntimeError, wasm_runtime_error)
#undef DEFINE_ERROR
-Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
- Handle<SharedFunctionInfo> info,
- Handle<Context> context,
- AllocationType allocation) {
- Handle<JSFunction> function(JSFunction::cast(New(map, allocation)),
- isolate());
-
- Handle<Code> code;
- bool have_cached_code = info->TryGetCachedCode(isolate()).ToHandle(&code);
-
- function->initialize_properties(isolate());
- function->initialize_elements();
- function->set_shared(*info);
- function->set_code(have_cached_code ? *code : info->GetCode());
- function->set_context(*context);
- function->set_raw_feedback_cell(*many_closures_cell());
- int header_size;
- if (map->has_prototype_slot()) {
- header_size = JSFunction::kSizeWithPrototype;
- function->set_prototype_or_initial_map(*the_hole_value());
- } else {
- header_size = JSFunction::kSizeWithoutPrototype;
- }
- InitializeJSObjectBody(function, map, header_size);
-
- if (have_cached_code) {
- IsCompiledScope is_compiled_scope(info->is_compiled_scope(isolate()));
- JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
- if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceHit(info, code);
- }
-
- return function;
-}
-
-Handle<JSFunction> Factory::NewFunctionForTest(Handle<String> name) {
- NewFunctionArgs args = NewFunctionArgs::ForFunctionWithoutCode(
- name, isolate()->sloppy_function_map(), LanguageMode::kSloppy);
- Handle<JSFunction> result = NewFunction(args);
- DCHECK(is_sloppy(result->shared().language_mode()));
- return result;
-}
-
-Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
- DCHECK(!args.name_.is_null());
-
- // Create the SharedFunctionInfo.
- Handle<NativeContext> context(isolate()->native_context());
- Handle<Map> map = args.GetMap(isolate());
- Handle<SharedFunctionInfo> info =
- NewSharedFunctionInfo(args.name_, args.maybe_wasm_function_data_,
- args.maybe_builtin_id_, kNormalFunction);
-
- // Proper language mode in shared function info will be set later.
- DCHECK(is_sloppy(info->language_mode()));
- DCHECK(!map->IsUndefined(isolate()));
-
-#ifdef DEBUG
- if (isolate()->bootstrapper()->IsActive()) {
- Handle<Code> code;
- DCHECK(
- // During bootstrapping some of these maps could be not created yet.
- (*map == context->get(Context::STRICT_FUNCTION_MAP_INDEX)) ||
- (*map ==
- context->get(Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)) ||
- (*map ==
- context->get(
- Context::STRICT_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX)) ||
- // Check if it's a creation of an empty or Proxy function during
- // bootstrapping.
- (args.maybe_builtin_id_ == Builtins::kEmptyFunction ||
- args.maybe_builtin_id_ == Builtins::kProxyConstructor));
- }
-#endif
-
- Handle<JSFunction> result = NewFunction(map, info, context);
-
- if (args.should_set_prototype_) {
- result->set_prototype_or_initial_map(
- *args.maybe_prototype_.ToHandleChecked());
- }
-
- if (args.should_set_language_mode_) {
- result->shared().set_language_mode(args.language_mode_);
- }
-
- if (args.should_create_and_set_initial_map_) {
- ElementsKind elements_kind;
- switch (args.type_) {
- case JS_ARRAY_TYPE:
- elements_kind = PACKED_SMI_ELEMENTS;
- break;
- case JS_ARGUMENTS_OBJECT_TYPE:
- elements_kind = PACKED_ELEMENTS;
- break;
- default:
- elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
- break;
- }
- Handle<Map> initial_map = NewMap(args.type_, args.instance_size_,
- elements_kind, args.inobject_properties_);
- result->shared().set_expected_nof_properties(args.inobject_properties_);
- // TODO(littledan): Why do we have this is_generator test when
- // NewFunctionPrototype already handles finding an appropriately
- // shared prototype?
- Handle<HeapObject> prototype = args.maybe_prototype_.ToHandleChecked();
- if (!IsResumableFunction(result->shared().kind())) {
- if (prototype->IsTheHole(isolate())) {
- prototype = NewFunctionPrototype(result);
- }
- }
- JSFunction::SetInitialMap(result, initial_map, prototype);
- }
-
- return result;
-}
-
Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
// Make sure to use globals from the function's context, since the function
// can be from a different context.
@@ -1970,71 +1849,6 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
return prototype;
}
-Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> info, Handle<Context> context,
- AllocationType allocation) {
- Handle<Map> initial_map(
- Map::cast(context->native_context().get(info->function_map_index())),
- isolate());
- return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
- allocation);
-}
-
-Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> info, Handle<Context> context,
- Handle<FeedbackCell> feedback_cell, AllocationType allocation) {
- Handle<Map> initial_map(
- Map::cast(context->native_context().get(info->function_map_index())),
- isolate());
- return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
- feedback_cell, allocation);
-}
-
-Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
- Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
- Handle<Context> context, AllocationType allocation) {
- DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
- Handle<JSFunction> result =
- NewFunction(initial_map, info, context, allocation);
-
- // Give compiler a chance to pre-initialize.
- Compiler::PostInstantiation(result);
-
- return result;
-}
-
-Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
- Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
- Handle<Context> context, Handle<FeedbackCell> feedback_cell,
- AllocationType allocation) {
- DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
- Handle<JSFunction> result =
- NewFunction(initial_map, info, context, allocation);
-
- // Bump the closure count that is encoded in the feedback cell's map.
- if (feedback_cell->map() == *no_closures_cell_map()) {
- feedback_cell->set_map(*one_closure_cell_map());
- } else if (feedback_cell->map() == *one_closure_cell_map()) {
- feedback_cell->set_map(*many_closures_cell_map());
- } else {
- DCHECK(feedback_cell->map() == *many_closures_cell_map());
- }
-
- // Check that the optimized code in the feedback cell wasn't marked for
- // deoptimization while not pointed to by any live JSFunction.
- if (feedback_cell->value().IsFeedbackVector()) {
- FeedbackVector::cast(feedback_cell->value())
- .EvictOptimizedCodeMarkedForDeoptimization(
- *info, "new function from shared function info");
- }
- result->set_raw_feedback_cell(*feedback_cell);
-
- // Give compiler a chance to pre-initialize.
- Compiler::PostInstantiation(result);
-
- return result;
-}
-
Handle<JSObject> Factory::NewExternal(void* value) {
Handle<Foreign> foreign = NewForeign(reinterpret_cast<Address>(value));
Handle<JSObject> external = NewJSObjectFromMap(external_map());
@@ -2063,8 +1877,13 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Builtins::CodeObjectIsExecutable(code->builtin_index());
Handle<Code> result = Builtins::GenerateOffHeapTrampolineFor(
isolate(), off_heap_entry,
- code->code_data_container().kind_specific_flags(),
+ code->code_data_container(kAcquireLoad).kind_specific_flags(),
generate_jump_to_instruction_stream);
+
+ // Trampolines may not contain any metadata since all metadata offsets,
+ // stored on the Code object, refer to the off-heap metadata area.
+ CHECK_EQ(result->raw_metadata_size(), 0);
+
// The CodeDataContainer should not be modified beyond this point since it's
// now possibly canonicalized.
@@ -2076,14 +1895,13 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
const bool set_is_off_heap_trampoline = true;
const int stack_slots =
code->has_safepoint_info() ? code->stack_slots() : 0;
- result->initialize_flags(code->kind(), code->has_unwinding_info(),
- code->is_turbofanned(), stack_slots,
+ result->initialize_flags(code->kind(), code->is_turbofanned(), stack_slots,
set_is_off_heap_trampoline);
result->set_builtin_index(code->builtin_index());
- result->set_safepoint_table_offset(code->safepoint_table_offset());
result->set_handler_table_offset(code->handler_table_offset());
result->set_constant_pool_offset(code->constant_pool_offset());
result->set_code_comments_offset(code->code_comments_offset());
+ result->set_unwinding_info_offset(code->unwinding_info_offset());
// Replace the newly generated trampoline's RelocInfo ByteArray with the
// canonical one stored in the roots to avoid duplicating it for every
@@ -2108,7 +1926,8 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Handle<Code> Factory::CopyCode(Handle<Code> code) {
Handle<CodeDataContainer> data_container = NewCodeDataContainer(
- code->code_data_container().kind_specific_flags(), AllocationType::kOld);
+ code->code_data_container(kAcquireLoad).kind_specific_flags(),
+ AllocationType::kOld);
Heap* heap = isolate()->heap();
Handle<Code> new_code;
@@ -2116,8 +1935,7 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
int obj_size = code->Size();
CodePageCollectionMemoryModificationScope code_allocation(heap);
HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>(
- obj_size, AllocationType::kCode, AllocationOrigin::kRuntime,
- AllocationAlignment::kCodeAligned);
+ obj_size, AllocationType::kCode, AllocationOrigin::kRuntime);
// Copy code object.
Address old_addr = code->address();
@@ -2126,7 +1944,7 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
new_code = handle(Code::cast(result), isolate());
// Set the {CodeDataContainer}, it cannot be shared.
- new_code->set_code_data_container(*data_container);
+ new_code->set_code_data_container(*data_container, kReleaseStore);
new_code->Relocate(new_addr - old_addr);
// We have to iterate over the object and process its pointers when black
@@ -2143,6 +1961,7 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
#endif
DCHECK(IsAligned(new_code->address(), kCodeAlignment));
DCHECK_IMPLIES(
+ !V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
!heap->memory_allocator()->code_range().is_empty(),
heap->memory_allocator()->code_range().contains(new_code->address()));
return new_code;
@@ -2162,8 +1981,8 @@ Handle<BytecodeArray> Factory::CopyBytecodeArray(
bytecode_array->incoming_new_target_or_generator_register());
copy->set_constant_pool(bytecode_array->constant_pool());
copy->set_handler_table(bytecode_array->handler_table());
- copy->set_synchronized_source_position_table(
- bytecode_array->synchronized_source_position_table());
+ copy->set_source_position_table(
+ bytecode_array->source_position_table(kAcquireLoad), kReleaseStore);
copy->set_osr_loop_nesting_level(bytecode_array->osr_loop_nesting_level());
copy->set_bytecode_age(bytecode_array->bytecode_age());
bytecode_array->CopyBytecodesTo(*copy);
@@ -2214,7 +2033,8 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
// The global object might be created from an object template with accessors.
// Fill these accessors into the dictionary.
- Handle<DescriptorArray> descs(map->instance_descriptors(), isolate());
+ Handle<DescriptorArray> descs(map->instance_descriptors(kRelaxedLoad),
+ isolate());
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
// Only accessors are expected.
@@ -2696,6 +2516,7 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
Handle<JSTypedArray> typed_array =
Handle<JSTypedArray>::cast(NewJSArrayBufferView(
map, empty_byte_array(), buffer, byte_offset, byte_length));
+ typed_array->AllocateExternalPointerEntries(isolate());
typed_array->set_length(length);
typed_array->SetOffHeapDataPtr(isolate(), buffer->backing_store(),
byte_offset);
@@ -2709,6 +2530,7 @@ Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
isolate());
Handle<JSDataView> obj = Handle<JSDataView>::cast(NewJSArrayBufferView(
map, empty_fixed_array(), buffer, byte_offset, byte_length));
+ obj->AllocateExternalPointerEntries(isolate());
obj->set_data_pointer(
isolate(), static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
return obj;
@@ -3054,8 +2876,7 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
debug_info->set_shared(*shared);
debug_info->set_debugger_hints(0);
DCHECK_EQ(DebugInfo::kNoDebuggingId, debug_info->debugging_id());
- DCHECK(!shared->HasDebugInfo());
- debug_info->set_script(shared->script_or_debug_info());
+ debug_info->set_script(shared->script_or_debug_info(kAcquireLoad));
debug_info->set_original_bytecode_array(
ReadOnlyRoots(heap).undefined_value());
debug_info->set_debug_bytecode_array(ReadOnlyRoots(heap).undefined_value());
@@ -3463,7 +3284,8 @@ Handle<Map> Factory::CreateSloppyFunctionMap(
map->AppendDescriptor(isolate(), &d);
}
DCHECK_EQ(inobject_properties_count, field_index);
- DCHECK_EQ(0, map->instance_descriptors().number_of_slack_descriptors());
+ DCHECK_EQ(
+ 0, map->instance_descriptors(kRelaxedLoad).number_of_slack_descriptors());
LOG(isolate(), MapDetails(*map));
return map;
}
@@ -3546,7 +3368,8 @@ Handle<Map> Factory::CreateStrictFunctionMap(
map->AppendDescriptor(isolate(), &d);
}
DCHECK_EQ(inobject_properties_count, field_index);
- DCHECK_EQ(0, map->instance_descriptors().number_of_slack_descriptors());
+ DCHECK_EQ(
+ 0, map->instance_descriptors(kRelaxedLoad).number_of_slack_descriptors());
LOG(isolate(), MapDetails(*map));
return map;
}
@@ -3774,5 +3597,217 @@ Handle<Map> NewFunctionArgs::GetMap(Isolate* isolate) const {
UNREACHABLE();
}
+Handle<JSFunction> Factory::NewFunctionForTest(Handle<String> name) {
+ NewFunctionArgs args = NewFunctionArgs::ForFunctionWithoutCode(
+ name, isolate()->sloppy_function_map(), LanguageMode::kSloppy);
+ Handle<JSFunction> result = NewFunction(args);
+ DCHECK(is_sloppy(result->shared().language_mode()));
+ return result;
+}
+
+Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
+ DCHECK(!args.name_.is_null());
+
+ // Create the SharedFunctionInfo.
+ Handle<NativeContext> context(isolate()->native_context());
+ Handle<Map> map = args.GetMap(isolate());
+ Handle<SharedFunctionInfo> info =
+ NewSharedFunctionInfo(args.name_, args.maybe_wasm_function_data_,
+ args.maybe_builtin_id_, kNormalFunction);
+
+ // Proper language mode in shared function info will be set later.
+ DCHECK(is_sloppy(info->language_mode()));
+ DCHECK(!map->IsUndefined(isolate()));
+
+ if (args.should_set_language_mode_) {
+ info->set_language_mode(args.language_mode_);
+ }
+
+#ifdef DEBUG
+ if (isolate()->bootstrapper()->IsActive()) {
+ Handle<Code> code;
+ DCHECK(
+ // During bootstrapping some of these maps could be not created yet.
+ (*map == context->get(Context::STRICT_FUNCTION_MAP_INDEX)) ||
+ (*map ==
+ context->get(Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)) ||
+ (*map ==
+ context->get(
+ Context::STRICT_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX)) ||
+ // Check if it's a creation of an empty or Proxy function during
+ // bootstrapping.
+ (args.maybe_builtin_id_ == Builtins::kEmptyFunction ||
+ args.maybe_builtin_id_ == Builtins::kProxyConstructor));
+ }
+#endif
+
+ Handle<JSFunction> result =
+ JSFunctionBuilder{isolate(), info, context}.set_map(map).Build();
+
+ // Both of these write to `prototype_or_initial_map`.
+ // TODO(jgruber): Fix callsites and enable the DCHECK.
+ // DCHECK(!args.should_set_prototype_ ||
+ // !args.should_create_and_set_initial_map_);
+ if (args.should_set_prototype_) {
+ result->set_prototype_or_initial_map(
+ *args.maybe_prototype_.ToHandleChecked());
+ }
+
+ if (args.should_create_and_set_initial_map_) {
+ ElementsKind elements_kind;
+ switch (args.type_) {
+ case JS_ARRAY_TYPE:
+ elements_kind = PACKED_SMI_ELEMENTS;
+ break;
+ case JS_ARGUMENTS_OBJECT_TYPE:
+ elements_kind = PACKED_ELEMENTS;
+ break;
+ default:
+ elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
+ break;
+ }
+ Handle<Map> initial_map = NewMap(args.type_, args.instance_size_,
+ elements_kind, args.inobject_properties_);
+ result->shared().set_expected_nof_properties(args.inobject_properties_);
+ // TODO(littledan): Why do we have this is_generator test when
+ // NewFunctionPrototype already handles finding an appropriately
+ // shared prototype?
+ Handle<HeapObject> prototype = args.maybe_prototype_.ToHandleChecked();
+ if (!IsResumableFunction(result->shared().kind())) {
+ if (prototype->IsTheHole(isolate())) {
+ prototype = NewFunctionPrototype(result);
+ }
+ }
+ JSFunction::SetInitialMap(result, initial_map, prototype);
+ }
+
+ return result;
+}
+
+Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
+ Handle<SharedFunctionInfo> info,
+ Handle<Context> context,
+ AllocationType allocation) {
+ // TODO(jgruber): Remove this function.
+ return JSFunctionBuilder{isolate(), info, context}
+ .set_map(map)
+ .set_allocation_type(allocation)
+ .Build();
+}
+
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+ Handle<SharedFunctionInfo> info, Handle<Context> context,
+ AllocationType allocation) {
+ // TODO(jgruber): Remove this function.
+ return JSFunctionBuilder{isolate(), info, context}
+ .set_allocation_type(allocation)
+ .Build();
+}
+
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+ Handle<SharedFunctionInfo> info, Handle<Context> context,
+ Handle<FeedbackCell> feedback_cell, AllocationType allocation) {
+ // TODO(jgruber): Remove this function.
+ return JSFunctionBuilder{isolate(), info, context}
+ .set_feedback_cell(feedback_cell)
+ .Build();
+}
+
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+ Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
+ Handle<Context> context, AllocationType allocation) {
+ // TODO(jgruber): Remove this function.
+ return JSFunctionBuilder{isolate(), info, context}
+ .set_map(initial_map)
+ .set_allocation_type(allocation)
+ .Build();
+}
+
+Factory::JSFunctionBuilder::JSFunctionBuilder(Isolate* isolate,
+ Handle<SharedFunctionInfo> sfi,
+ Handle<Context> context)
+ : isolate_(isolate), sfi_(sfi), context_(context) {}
+
+Handle<JSFunction> Factory::JSFunctionBuilder::Build() {
+ PrepareMap();
+ PrepareFeedbackCell();
+
+ // Determine the associated Code object.
+ Handle<Code> code;
+ const bool have_cached_code =
+ sfi_->TryGetCachedCode(isolate_).ToHandle(&code);
+ if (!have_cached_code) code = handle(sfi_->GetCode(), isolate_);
+
+ Handle<JSFunction> result = BuildRaw(code);
+
+ if (have_cached_code) {
+ IsCompiledScope is_compiled_scope(sfi_->is_compiled_scope(isolate_));
+ JSFunction::EnsureFeedbackVector(result, &is_compiled_scope);
+ if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceHit(sfi_, code);
+ }
+
+ Compiler::PostInstantiation(result);
+ return result;
+}
+
+Handle<JSFunction> Factory::JSFunctionBuilder::BuildRaw(Handle<Code> code) {
+ Isolate* isolate = isolate_;
+ Factory* factory = isolate_->factory();
+
+ Handle<Map> map = maybe_map_.ToHandleChecked();
+ Handle<FeedbackCell> feedback_cell = maybe_feedback_cell_.ToHandleChecked();
+
+ DCHECK_EQ(JS_FUNCTION_TYPE, map->instance_type());
+
+ // Allocation.
+ Handle<JSFunction> function(
+ JSFunction::cast(factory->New(map, allocation_type_)), isolate);
+
+ // Header initialization.
+ function->initialize_properties(isolate);
+ function->initialize_elements();
+ function->set_shared(*sfi_);
+ function->set_context(*context_);
+ function->set_raw_feedback_cell(*feedback_cell);
+ function->set_code(*code);
+ if (map->has_prototype_slot()) {
+ function->set_prototype_or_initial_map(
+ ReadOnlyRoots(isolate).the_hole_value());
+ }
+
+ // Potentially body initialization.
+ factory->InitializeJSObjectBody(
+ function, map, JSFunction::GetHeaderSize(map->has_prototype_slot()));
+
+ return function;
+}
+
+void Factory::JSFunctionBuilder::PrepareMap() {
+ if (maybe_map_.is_null()) {
+ // No specific map requested, use the default.
+ maybe_map_ = handle(
+ Map::cast(context_->native_context().get(sfi_->function_map_index())),
+ isolate_);
+ }
+}
+
+void Factory::JSFunctionBuilder::PrepareFeedbackCell() {
+ Handle<FeedbackCell> feedback_cell;
+ if (maybe_feedback_cell_.ToHandle(&feedback_cell)) {
+ // Track the newly-created closure, and check that the optimized code in
+ // the feedback cell wasn't marked for deoptimization while not pointed to
+ // by any live JSFunction.
+ feedback_cell->IncrementClosureCount(isolate_);
+ if (feedback_cell->value().IsFeedbackVector()) {
+ FeedbackVector::cast(feedback_cell->value())
+ .EvictOptimizedCodeMarkedForDeoptimization(
+ *sfi_, "new function from shared function info");
+ }
+ } else {
+ // Fall back to the many_closures_cell.
+ maybe_feedback_cell_ = isolate_->factory()->many_closures_cell();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 05f271e3a2..350a400035 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -18,7 +18,9 @@
#include "src/objects/dictionary.h"
#include "src/objects/js-array.h"
#include "src/objects/js-regexp.h"
+#include "src/objects/shared-function-info.h"
#include "src/objects/string.h"
+#include "torque-generated/class-forward-declarations.h"
namespace v8 {
namespace internal {
@@ -69,6 +71,11 @@ class WasmCapiFunctionData;
class WasmExportedFunctionData;
class WasmJSFunctionData;
class WeakCell;
+
+namespace wasm {
+class ValueType;
+} // namespace wasm
+
enum class SharedFlag : uint8_t;
enum class InitializedFlag : uint8_t;
@@ -114,6 +121,10 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
#include "torque-generated/factory.inc"
+ // Avoid the Torque-generated factory function to shadow the one from
+ // FactoryBase.
+ using FactoryBase::NewDescriptorArray;
+
Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number, const char* type_of,
byte kind);
@@ -157,10 +168,17 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<FrameArray> NewFrameArray(int number_of_frames);
+ // Allocates a |NameDictionary| with an internal capacity calculated such that
+ // |at_least_space_for| entries can be added without reallocating.
+ Handle<NameDictionary> NewNameDictionary(int at_least_space_for);
+
+ // Allocates an |OrderedNameDictionary| of the given capacity. This guarantees
+ // that |capacity| entries can be added without reallocating.
+ Handle<OrderedNameDictionary> NewOrderedNameDictionary(
+ int capacity = OrderedNameDictionary::kInitialCapacity);
+
Handle<OrderedHashSet> NewOrderedHashSet();
Handle<OrderedHashMap> NewOrderedHashMap();
- Handle<OrderedNameDictionary> NewOrderedNameDictionary();
-
Handle<SmallOrderedHashSet> NewSmallOrderedHashSet(
int capacity = kSmallOrderedHashSetMinCapacity,
AllocationType allocation = AllocationType::kYoung);
@@ -614,11 +632,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Function creation from SharedFunctionInfo.
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
- Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
- Handle<Context> context, Handle<FeedbackCell> feedback_cell,
- AllocationType allocation = AllocationType::kOld);
-
- Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info, Handle<Context> context,
Handle<FeedbackCell> feedback_cell,
AllocationType allocation = AllocationType::kOld);
@@ -792,6 +805,43 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return New(map, allocation);
}
+ // Helper class for creating JSFunction objects.
+ class JSFunctionBuilder final {
+ public:
+ JSFunctionBuilder(Isolate* isolate, Handle<SharedFunctionInfo> sfi,
+ Handle<Context> context);
+
+ V8_WARN_UNUSED_RESULT Handle<JSFunction> Build();
+
+ JSFunctionBuilder& set_map(Handle<Map> v) {
+ maybe_map_ = v;
+ return *this;
+ }
+ JSFunctionBuilder& set_allocation_type(AllocationType v) {
+ allocation_type_ = v;
+ return *this;
+ }
+ JSFunctionBuilder& set_feedback_cell(Handle<FeedbackCell> v) {
+ maybe_feedback_cell_ = v;
+ return *this;
+ }
+
+ private:
+ void PrepareMap();
+ void PrepareFeedbackCell();
+
+ V8_WARN_UNUSED_RESULT Handle<JSFunction> BuildRaw(Handle<Code> code);
+
+ Isolate* const isolate_;
+ Handle<SharedFunctionInfo> sfi_;
+ Handle<Context> context_;
+ MaybeHandle<Map> maybe_map_;
+ MaybeHandle<FeedbackCell> maybe_feedback_cell_;
+ AllocationType allocation_type_ = AllocationType::kOld;
+
+ friend class Factory;
+ };
+
// Allows creation of Code objects. It provides two build methods, one of
// which tries to gracefully handle allocation failure.
class V8_EXPORT_PRIVATE CodeBuilder final {
@@ -837,11 +887,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return *this;
}
- CodeBuilder& set_immovable() {
- is_movable_ = false;
- return *this;
- }
-
CodeBuilder& set_is_turbofanned() {
is_turbofanned_ = true;
return *this;
@@ -888,7 +933,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
BasicBlockProfilerData* profiler_data_ = nullptr;
bool is_executable_ = true;
bool read_only_data_container_ = false;
- bool is_movable_ = true;
bool is_turbofanned_ = false;
int stack_slots_ = 0;
};
diff --git a/deps/v8/src/heap/free-list.cc b/deps/v8/src/heap/free-list.cc
index e9bf77d171..80b4a4f01f 100644
--- a/deps/v8/src/heap/free-list.cc
+++ b/deps/v8/src/heap/free-list.cc
@@ -418,50 +418,6 @@ FreeSpace FreeListManyCachedOrigin::Allocate(size_t size_in_bytes,
}
// ------------------------------------------------
-// FreeListMap implementation
-
-FreeListMap::FreeListMap() {
- // Initializing base (FreeList) fields
- number_of_categories_ = 1;
- last_category_ = kOnlyCategory;
- min_block_size_ = kMinBlockSize;
- categories_ = new FreeListCategory*[number_of_categories_]();
-
- Reset();
-}
-
-size_t FreeListMap::GuaranteedAllocatable(size_t maximum_freed) {
- return maximum_freed;
-}
-
-Page* FreeListMap::GetPageForSize(size_t size_in_bytes) {
- return GetPageForCategoryType(kOnlyCategory);
-}
-
-FreeListMap::~FreeListMap() { delete[] categories_; }
-
-FreeSpace FreeListMap::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
-
- // The following DCHECK ensures that maps are allocated one by one (ie,
- // without folding). This assumption currently holds. However, if it were to
- // become untrue in the future, you'll get an error here. To fix it, I would
- // suggest removing the DCHECK, and replacing TryFindNodeIn by
- // SearchForNodeInList below.
- DCHECK_EQ(size_in_bytes, Map::kSize);
-
- FreeSpace node = TryFindNodeIn(kOnlyCategory, size_in_bytes, node_size);
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK_IMPLIES(node.is_null(), IsEmpty());
- return node;
-}
-
-// ------------------------------------------------
// Generic FreeList methods (non alloc/free related)
void FreeList::Reset() {
diff --git a/deps/v8/src/heap/free-list.h b/deps/v8/src/heap/free-list.h
index 25bba59836..afa23e051a 100644
--- a/deps/v8/src/heap/free-list.h
+++ b/deps/v8/src/heap/free-list.h
@@ -488,31 +488,6 @@ class V8_EXPORT_PRIVATE FreeListManyCachedOrigin
AllocationOrigin origin) override;
};
-// FreeList for maps: since maps are all the same size, uses a single freelist.
-class V8_EXPORT_PRIVATE FreeListMap : public FreeList {
- public:
- size_t GuaranteedAllocatable(size_t maximum_freed) override;
-
- Page* GetPageForSize(size_t size_in_bytes) override;
-
- FreeListMap();
- ~FreeListMap() override;
-
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) override;
-
- private:
- static const size_t kMinBlockSize = Map::kSize;
- static const size_t kMaxBlockSize = MemoryChunk::kPageSize;
- static const FreeListCategoryType kOnlyCategory = 0;
-
- FreeListCategoryType SelectFreeListCategoryType(
- size_t size_in_bytes) override {
- return kOnlyCategory;
- }
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index cc10d92f33..0469748c4e 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -576,7 +576,6 @@ void GCTracer::PrintNVP() const {
"fast_promote=%.2f "
"complete.sweep_array_buffers=%.2f "
"scavenge=%.2f "
- "scavenge.process_array_buffers=%.2f "
"scavenge.free_remembered_set=%.2f "
"scavenge.roots=%.2f "
"scavenge.weak=%.2f "
@@ -617,10 +616,9 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::HEAP_EXTERNAL_PROLOGUE],
current_.scopes[Scope::HEAP_EXTERNAL_EPILOGUE],
current_.scopes[Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES],
- current_.scopes[Scope::SCAVENGER_SWEEP_ARRAY_BUFFERS],
current_.scopes[Scope::SCAVENGER_FAST_PROMOTE],
+ current_.scopes[Scope::SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS],
current_.scopes[Scope::SCAVENGER_SCAVENGE],
- current_.scopes[Scope::SCAVENGER_PROCESS_ARRAY_BUFFERS],
current_.scopes[Scope::SCAVENGER_FREE_REMEMBERED_SET],
current_.scopes[Scope::SCAVENGER_SCAVENGE_ROOTS],
current_.scopes[Scope::SCAVENGER_SCAVENGE_WEAK],
@@ -1229,22 +1227,27 @@ void GCTracer::RecordGCPhasesHistograms(TimedHistogram* gc_timer) {
heap_->isolate()->counters()->gc_marking_sum()->AddSample(
static_cast<int>(overall_marking_time));
+ // Filter out samples where
+ // - we don't have high-resolution timers;
+ // - size of marked objects is very small;
+ // - marking time is rounded to 0;
constexpr size_t kMinObjectSizeForReportingThroughput = 1024 * 1024;
if (base::TimeTicks::IsHighResolution() &&
- heap_->SizeOfObjects() > kMinObjectSizeForReportingThroughput) {
- DCHECK_GT(overall_marking_time, 0.0);
+ heap_->SizeOfObjects() > kMinObjectSizeForReportingThroughput &&
+ overall_marking_time > 0) {
const double overall_v8_marking_time =
overall_marking_time -
current_.scopes[Scope::MC_MARK_EMBEDDER_TRACING];
- DCHECK_GT(overall_v8_marking_time, 0.0);
- const int main_thread_marking_throughput_mb_per_s =
- static_cast<int>(static_cast<double>(heap_->SizeOfObjects()) /
- overall_v8_marking_time * 1000 / 1024 / 1024);
- heap_->isolate()
- ->counters()
- ->gc_main_thread_marking_throughput()
- ->AddSample(
- static_cast<int>(main_thread_marking_throughput_mb_per_s));
+ if (overall_v8_marking_time > 0) {
+ const int main_thread_marking_throughput_mb_per_s =
+ static_cast<int>(static_cast<double>(heap_->SizeOfObjects()) /
+ overall_v8_marking_time * 1000 / 1024 / 1024);
+ heap_->isolate()
+ ->counters()
+ ->gc_main_thread_marking_throughput()
+ ->AddSample(
+ static_cast<int>(main_thread_marking_throughput_mb_per_s));
+ }
}
DCHECK_EQ(Scope::LAST_TOP_MC_SCOPE, Scope::MC_SWEEP);
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index d4dc7e2b8c..fe4f2b18bc 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -171,8 +171,8 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(AllowGarbageCollection::IsAllowed());
- DCHECK_IMPLIES(type == AllocationType::kCode,
- alignment == AllocationAlignment::kCodeAligned);
+ DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap,
+ alignment == AllocationAlignment::kWordAligned);
DCHECK_EQ(gc_state(), NOT_IN_GC);
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
@@ -185,10 +185,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
IncrementObjectCounters();
#endif
- size_t large_object_threshold =
- AllocationType::kCode == type
- ? std::min(kMaxRegularHeapObjectSize, code_space()->AreaSize())
- : kMaxRegularHeapObjectSize;
+ size_t large_object_threshold = MaxRegularHeapObjectSize(type);
bool large_object =
static_cast<size_t>(size_in_bytes) > large_object_threshold;
@@ -223,6 +220,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
allocation = old_space_->AllocateRaw(size_in_bytes, alignment, origin);
}
} else if (AllocationType::kCode == type) {
+ DCHECK(AllowCodeAllocation::IsAllowed());
if (large_object) {
allocation = code_lo_space_->AllocateRaw(size_in_bytes);
} else {
@@ -231,7 +229,6 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
} else if (AllocationType::kMap == type) {
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
} else if (AllocationType::kReadOnly == type) {
- DCHECK(isolate_->serializer_enabled());
DCHECK(!large_object);
DCHECK(CanAllocateInReadOnlySpace());
DCHECK_EQ(AllocationOrigin::kRuntime, origin);
@@ -275,27 +272,24 @@ HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(AllowGarbageCollection::IsAllowed());
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- AllocationResult result = AllocateRaw(size, allocation, origin, alignment);
- DCHECK(!result.IsRetry());
- return result.ToObjectChecked();
- }
DCHECK_EQ(gc_state(), NOT_IN_GC);
Heap* heap = isolate()->heap();
- Address* top = heap->NewSpaceAllocationTopAddress();
- Address* limit = heap->NewSpaceAllocationLimitAddress();
- if (allocation == AllocationType::kYoung &&
+ if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
+ allocation == AllocationType::kYoung &&
alignment == AllocationAlignment::kWordAligned &&
- size <= kMaxRegularHeapObjectSize &&
- (*limit - *top >= static_cast<unsigned>(size)) &&
- V8_LIKELY(!FLAG_single_generation && FLAG_inline_new &&
- FLAG_gc_interval == 0)) {
- DCHECK(IsAligned(size, kTaggedSize));
- HeapObject obj = HeapObject::FromAddress(*top);
- *top += size;
- heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size);
- return obj;
+ size <= MaxRegularHeapObjectSize(allocation)) {
+ Address* top = heap->NewSpaceAllocationTopAddress();
+ Address* limit = heap->NewSpaceAllocationLimitAddress();
+ if ((*limit - *top >= static_cast<unsigned>(size)) &&
+ V8_LIKELY(!FLAG_single_generation && FLAG_inline_new &&
+ FLAG_gc_interval == 0)) {
+ DCHECK(IsAligned(size, kTaggedSize));
+ HeapObject obj = HeapObject::FromAddress(*top);
+ *top += size;
+ heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size);
+ return obj;
+ }
}
switch (mode) {
case kLightRetry:
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index 79265c4db6..6c5fccb551 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -191,6 +191,7 @@ inline bool ObjectInYoungGeneration(Object object) {
}
inline bool IsReadOnlyHeapObject(HeapObject object) {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return ReadOnlyHeap::Contains(object);
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
return chunk->InReadOnlySpace();
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 4506ed71aa..e818600d5b 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -35,6 +35,7 @@
#include "src/heap/base/stack.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/code-stats.h"
+#include "src/heap/collection-barrier.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-allocator.h"
#include "src/heap/concurrent-marking.h"
@@ -195,7 +196,7 @@ Heap::Heap()
global_pretenuring_feedback_(kInitialFeedbackCapacity),
safepoint_(new GlobalSafepoint(this)),
external_string_table_(this),
- collection_barrier_(this) {
+ collection_barrier_(new CollectionBarrier(this)) {
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size() & (Page::kPageSize - 1));
@@ -395,7 +396,7 @@ size_t Heap::Available() {
}
bool Heap::CanExpandOldGeneration(size_t size) {
- if (force_oom_) return false;
+ if (force_oom_ || force_gc_on_next_allocation_) return false;
if (OldGenerationCapacity() + size > max_old_generation_size()) return false;
// The OldGenerationCapacity does not account compaction spaces used
// during evacuation. Ensure that expanding the old generation does push
@@ -1095,7 +1096,8 @@ void Heap::DeoptMarkedAllocationSites() {
void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
if (collector == MARK_COMPACTOR) {
- memory_pressure_level_ = MemoryPressureLevel::kNone;
+ memory_pressure_level_.store(MemoryPressureLevel::kNone,
+ std::memory_order_relaxed);
}
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_SAFEPOINT);
@@ -1151,6 +1153,9 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
ReduceNewSpaceSize();
}
+
+ // Resume all threads waiting for the GC.
+ collection_barrier_->ResumeThreadsAwaitingCollection();
}
void Heap::GarbageCollectionEpilogue() {
@@ -1212,6 +1217,8 @@ void Heap::HandleGCRequest() {
} else if (HighMemoryPressure()) {
incremental_marking()->reset_request_type();
CheckMemoryPressure();
+ } else if (CollectionRequested()) {
+ CheckCollectionRequested();
} else if (incremental_marking()->request_type() ==
IncrementalMarking::COMPLETE_MARKING) {
incremental_marking()->reset_request_type();
@@ -1502,16 +1509,14 @@ bool Heap::CollectGarbage(AllocationSpace space,
const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
- current_gc_flags_ & kForcedGC;
+ current_gc_flags_ & kForcedGC ||
+ force_gc_on_next_allocation_;
+ if (force_gc_on_next_allocation_) force_gc_on_next_allocation_ = false;
DevToolsTraceEventScope devtools_trace_event_scope(
this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
GarbageCollectionReasonToString(gc_reason));
- if (!CanPromoteYoungAndExpandOldGeneration(0)) {
- InvokeNearHeapLimitCallback();
- }
-
// Filter on-stack reference below this method.
isolate()
->global_handles()
@@ -1678,8 +1683,6 @@ bool Heap::CollectGarbage(AllocationSpace space,
isolate()->CountUsage(v8::Isolate::kForcedGC);
}
- collection_barrier_.CollectionPerformed();
-
// Start incremental marking for the next cycle. We do this only for scavenger
// to avoid a loop where mark-compact causes another mark-compact.
if (IsYoungGenerationCollector(collector)) {
@@ -1688,6 +1691,13 @@ bool Heap::CollectGarbage(AllocationSpace space,
kGCCallbackScheduleIdleGarbageCollection);
}
+ if (!CanExpandOldGeneration(0)) {
+ InvokeNearHeapLimitCallback();
+ if (!CanExpandOldGeneration(0)) {
+ FatalProcessOutOfMemory("Reached heap limit");
+ }
+ }
+
return freed_global_handles > 0;
}
@@ -1696,7 +1706,7 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
if (!dependant_context) {
tracer()->ResetSurvivalEvents();
old_generation_size_configured_ = false;
- old_generation_allocation_limit_ = initial_old_generation_size_;
+ set_old_generation_allocation_limit(initial_old_generation_size_);
MemoryReducer::Event event;
event.type = MemoryReducer::kPossibleGarbage;
event.time_ms = MonotonicallyIncreasingTimeInMs();
@@ -1878,125 +1888,6 @@ static void VerifyStringTable(Isolate* isolate) {
}
#endif // VERIFY_HEAP
-bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
- bool gc_performed = true;
- int counter = 0;
- static const int kThreshold = 20;
- while (gc_performed && counter++ < kThreshold) {
- gc_performed = false;
- for (int space = FIRST_SPACE;
- space < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces);
- space++) {
- DCHECK_NE(space, NEW_SPACE);
- DCHECK_NE(space, NEW_LO_SPACE);
- Reservation* reservation = &reservations[space];
- DCHECK_LE(1, reservation->size());
- if (reservation->at(0).size == 0) {
- DCHECK_EQ(1, reservation->size());
- continue;
- }
- bool perform_gc = false;
- if (space == MAP_SPACE) {
- // We allocate each map individually to avoid fragmentation.
- maps->clear();
- DCHECK_LE(reservation->size(), 2);
- int reserved_size = 0;
- for (const Chunk& c : *reservation) reserved_size += c.size;
- DCHECK_EQ(0, reserved_size % Map::kSize);
- int num_maps = reserved_size / Map::kSize;
- for (int i = 0; i < num_maps; i++) {
- AllocationResult allocation;
-#if V8_ENABLE_THIRD_PARTY_HEAP_BOOL
- allocation = AllocateRaw(Map::kSize, AllocationType::kMap,
- AllocationOrigin::kRuntime, kWordAligned);
-#else
- allocation = map_space()->AllocateRawUnaligned(Map::kSize);
-#endif
- HeapObject free_space;
- if (allocation.To(&free_space)) {
- // Mark with a free list node, in case we have a GC before
- // deserializing.
- Address free_space_address = free_space.address();
- CreateFillerObjectAt(free_space_address, Map::kSize,
- ClearRecordedSlots::kNo);
- maps->push_back(free_space_address);
- } else {
- perform_gc = true;
- break;
- }
- }
- } else if (space == LO_SPACE) {
- // Just check that we can allocate during deserialization.
- DCHECK_LE(reservation->size(), 2);
- int reserved_size = 0;
- for (const Chunk& c : *reservation) reserved_size += c.size;
- perform_gc = !CanExpandOldGeneration(reserved_size);
- } else {
- for (auto& chunk : *reservation) {
- AllocationResult allocation;
- int size = chunk.size;
- DCHECK_LE(static_cast<size_t>(size),
- MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
- static_cast<AllocationSpace>(space)));
-#if V8_ENABLE_THIRD_PARTY_HEAP_BOOL
- AllocationType type = (space == CODE_SPACE)
- ? AllocationType::kCode
- : (space == RO_SPACE)
- ? AllocationType::kReadOnly
- : AllocationType::kYoung;
- AllocationAlignment align =
- (space == CODE_SPACE) ? kCodeAligned : kWordAligned;
- allocation =
- AllocateRaw(size, type, AllocationOrigin::kRuntime, align);
-#else
- if (space == RO_SPACE) {
- allocation = read_only_space()->AllocateRaw(
- size, AllocationAlignment::kWordAligned);
- } else {
- // The deserializer will update the skip list.
- allocation = paged_space(space)->AllocateRawUnaligned(size);
- }
-#endif
- HeapObject free_space;
- if (allocation.To(&free_space)) {
- // Mark with a free list node, in case we have a GC before
- // deserializing.
- Address free_space_address = free_space.address();
- CreateFillerObjectAt(free_space_address, size,
- ClearRecordedSlots::kNo);
- DCHECK(IsPreAllocatedSpace(static_cast<SnapshotSpace>(space)));
- chunk.start = free_space_address;
- chunk.end = free_space_address + size;
- } else {
- perform_gc = true;
- break;
- }
- }
- }
- if (perform_gc) {
- // We cannot perfom a GC with an uninitialized isolate. This check
- // fails for example if the max old space size is chosen unwisely,
- // so that we cannot allocate space to deserialize the initial heap.
- if (!deserialization_complete_) {
- V8::FatalProcessOutOfMemory(
- isolate(), "insufficient memory to create an Isolate");
- }
- if (counter > 1) {
- CollectAllGarbage(kReduceMemoryFootprintMask,
- GarbageCollectionReason::kDeserializer);
- } else {
- CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kDeserializer);
- }
- gc_performed = true;
- break; // Abort for-loop over spaces and retry.
- }
- }
- }
-
- return !gc_performed;
-}
-
-
void Heap::EnsureFromSpaceIsCommitted() {
if (new_space_->CommitFromSpaceIfNeeded()) return;
@@ -2005,35 +1896,28 @@ void Heap::EnsureFromSpaceIsCommitted() {
FatalProcessOutOfMemory("Committing semi space failed.");
}
-void Heap::CollectionBarrier::CollectionPerformed() {
- base::MutexGuard guard(&mutex_);
- gc_requested_ = false;
- cond_.NotifyAll();
+bool Heap::CollectionRequested() {
+ return collection_barrier_->CollectionRequested();
}
-void Heap::CollectionBarrier::ShutdownRequested() {
- base::MutexGuard guard(&mutex_);
- shutdown_requested_ = true;
- cond_.NotifyAll();
+void Heap::RequestCollectionBackground(LocalHeap* local_heap) {
+ if (local_heap->is_main_thread()) {
+ CollectAllGarbage(current_gc_flags_,
+ GarbageCollectionReason::kBackgroundAllocationFailure,
+ current_gc_callback_flags_);
+ } else {
+ collection_barrier_->AwaitCollectionBackground();
+ }
}
-void Heap::CollectionBarrier::Wait() {
- base::MutexGuard guard(&mutex_);
-
- if (shutdown_requested_) return;
-
- if (!gc_requested_) {
- heap_->MemoryPressureNotification(MemoryPressureLevel::kCritical, false);
- gc_requested_ = true;
- }
+void Heap::CheckCollectionRequested() {
+ if (!collection_barrier_->CollectionRequested()) return;
- while (gc_requested_ && !shutdown_requested_) {
- cond_.Wait(&mutex_);
- }
+ CollectAllGarbage(current_gc_flags_,
+ GarbageCollectionReason::kBackgroundAllocationFailure,
+ current_gc_callback_flags_);
}
-void Heap::RequestAndWaitForCollection() { collection_barrier_.Wait(); }
-
void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return;
@@ -2060,6 +1944,11 @@ size_t Heap::PerformGarbageCollection(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
DisallowJavascriptExecution no_js(isolate());
base::Optional<SafepointScope> optional_safepoint_scope;
+
+ // Stop time-to-collection timer before safepoint - we do not want to measure
+ // time for safepointing.
+ collection_barrier_->StopTimeToCollectionTimer();
+
if (FLAG_local_heaps) {
optional_safepoint_scope.emplace(this);
}
@@ -2182,11 +2071,11 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
if (collector == MARK_COMPACTOR) {
external_memory_.ResetAfterGC();
- old_generation_allocation_limit_ =
+ set_old_generation_allocation_limit(
MemoryController<V8HeapTrait>::CalculateAllocationLimit(
this, old_gen_size, min_old_generation_size_,
max_old_generation_size(), new_space_capacity, v8_growing_factor,
- mode);
+ mode));
if (UseGlobalMemoryScheduling()) {
DCHECK_GT(global_growing_factor, 0);
global_allocation_limit_ =
@@ -2204,8 +2093,8 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
this, old_gen_size, min_old_generation_size_,
max_old_generation_size(), new_space_capacity, v8_growing_factor,
mode);
- if (new_old_generation_limit < old_generation_allocation_limit_) {
- old_generation_allocation_limit_ = new_old_generation_limit;
+ if (new_old_generation_limit < old_generation_allocation_limit()) {
+ set_old_generation_allocation_limit(new_old_generation_limit);
}
if (UseGlobalMemoryScheduling()) {
DCHECK_GT(global_growing_factor, 0);
@@ -2912,11 +2801,11 @@ void Heap::ConfigureInitialOldGenerationSize() {
const size_t new_old_generation_allocation_limit =
Max(OldGenerationSizeOfObjects() + minimum_growing_step,
static_cast<size_t>(
- static_cast<double>(old_generation_allocation_limit_) *
+ static_cast<double>(old_generation_allocation_limit()) *
(tracer()->AverageSurvivalRatio() / 100)));
if (new_old_generation_allocation_limit <
- old_generation_allocation_limit_) {
- old_generation_allocation_limit_ = new_old_generation_allocation_limit;
+ old_generation_allocation_limit()) {
+ set_old_generation_allocation_limit(new_old_generation_allocation_limit);
} else {
old_generation_size_configured_ = true;
}
@@ -3088,6 +2977,7 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor {
namespace {
bool MayContainRecordedSlots(HeapObject object) {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return false;
// New space object do not have recorded slots.
if (BasicMemoryChunk::FromHeapObject(object)->InYoungGeneration())
return false;
@@ -3538,47 +3428,6 @@ void Heap::FinalizeIncrementalMarkingIncrementally(
InvokeIncrementalMarkingEpilogueCallbacks();
}
-void Heap::RegisterDeserializedObjectsForBlackAllocation(
- Reservation* reservations, const std::vector<HeapObject>& large_objects,
- const std::vector<Address>& maps) {
- // TODO(ulan): pause black allocation during deserialization to avoid
- // iterating all these objects in one go.
-
- if (!incremental_marking()->black_allocation()) return;
-
- // Iterate black objects in old space, code space, map space, and large
- // object space for side effects.
- IncrementalMarking::MarkingState* marking_state =
- incremental_marking()->marking_state();
- for (int i = OLD_SPACE;
- i < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces); i++) {
- const Heap::Reservation& res = reservations[i];
- for (auto& chunk : res) {
- Address addr = chunk.start;
- while (addr < chunk.end) {
- HeapObject obj = HeapObject::FromAddress(addr);
- // Objects can have any color because incremental marking can
- // start in the middle of Heap::ReserveSpace().
- if (marking_state->IsBlack(obj)) {
- incremental_marking()->ProcessBlackAllocatedObject(obj);
- }
- addr += obj.Size();
- }
- }
- }
-
- // Large object space doesn't use reservations, so it needs custom handling.
- for (HeapObject object : large_objects) {
- incremental_marking()->ProcessBlackAllocatedObject(object);
- }
-
- // Map space doesn't use reservations, so it needs custom handling.
- for (Address addr : maps) {
- incremental_marking()->ProcessBlackAllocatedObject(
- HeapObject::FromAddress(addr));
- }
-}
-
void Heap::NotifyObjectLayoutChange(
HeapObject object, const DisallowHeapAllocation&,
InvalidateRecordedSlots invalidate_recorded_slots) {
@@ -3636,13 +3485,19 @@ class SlotCollectingVisitor final : public ObjectVisitor {
void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
if (!FLAG_verify_heap) return;
- // Check that Heap::NotifyObjectLayout was called for object transitions
+ // Check that Heap::NotifyObjectLayoutChange was called for object transitions
// that are not safe for concurrent marking.
// If you see this check triggering for a freshly allocated object,
// use object->set_map_after_allocation() to initialize its map.
if (pending_layout_change_object_.is_null()) {
if (object.IsJSObject()) {
DCHECK(!object.map().TransitionRequiresSynchronizationWithGC(new_map));
+ } else if (object.IsString() &&
+ (new_map == ReadOnlyRoots(this).thin_string_map() ||
+ new_map == ReadOnlyRoots(this).thin_one_byte_string_map())) {
+ // When transitioning a string to ThinString,
+ // Heap::NotifyObjectLayoutChange doesn't need to be invoked because only
+ // tagged fields are introduced.
} else {
// Check that the set of slots before and after the transition match.
SlotCollectingVisitor old_visitor;
@@ -3812,11 +3667,11 @@ void Heap::CheckMemoryPressure() {
// The optimizing compiler may be unnecessarily holding on to memory.
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
}
- MemoryPressureLevel memory_pressure_level = memory_pressure_level_;
// Reset the memory pressure level to avoid recursive GCs triggered by
// CheckMemoryPressure from AdjustAmountOfExternalMemory called by
// the finalizers.
- memory_pressure_level_ = MemoryPressureLevel::kNone;
+ MemoryPressureLevel memory_pressure_level = memory_pressure_level_.exchange(
+ MemoryPressureLevel::kNone, std::memory_order_relaxed);
if (memory_pressure_level == MemoryPressureLevel::kCritical) {
TRACE_EVENT0("devtools.timeline,v8", "V8.CheckMemoryPressure");
CollectGarbageOnMemoryPressure();
@@ -3869,8 +3724,8 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
bool is_isolate_locked) {
TRACE_EVENT1("devtools.timeline,v8", "V8.MemoryPressureNotification", "level",
static_cast<int>(level));
- MemoryPressureLevel previous = memory_pressure_level_;
- memory_pressure_level_ = level;
+ MemoryPressureLevel previous =
+ memory_pressure_level_.exchange(level, std::memory_order_relaxed);
if ((previous != MemoryPressureLevel::kCritical &&
level == MemoryPressureLevel::kCritical) ||
(previous == MemoryPressureLevel::kNone &&
@@ -4048,6 +3903,8 @@ const char* Heap::GarbageCollectionReasonToString(
return "measure memory";
case GarbageCollectionReason::kUnknown:
return "unknown";
+ case GarbageCollectionReason::kBackgroundAllocationFailure:
+ return "background allocation failure";
}
UNREACHABLE();
}
@@ -4149,6 +4006,7 @@ void Heap::Verify() {
// We have to wait here for the sweeper threads to have an iterable heap.
mark_compact_collector()->EnsureSweepingCompleted();
+
array_buffer_sweeper()->EnsureFinished();
VerifyPointersVisitor visitor(this);
@@ -4160,6 +4018,12 @@ void Heap::Verify() {
.NormalizedMapCacheVerify(isolate());
}
+ // The heap verifier can't deal with partially deserialized objects, so
+ // disable it if a deserializer is active.
+ // TODO(leszeks): Enable verification during deserialization, e.g. by only
+ // blocklisting objects that are in a partially deserialized state.
+ if (isolate()->has_active_deserializer()) return;
+
VerifySmisVisitor smis_visitor;
IterateSmiRoots(&smis_visitor);
@@ -4450,11 +4314,12 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
inline void FixHandle(FullObjectSlot p) {
if (!(*p).IsHeapObject()) return;
HeapObject current = HeapObject::cast(*p);
- const MapWord map_word = current.map_word();
- if (!map_word.IsForwardingAddress() && current.IsFreeSpaceOrFiller()) {
+ if (!current.map_word().IsForwardingAddress() &&
+ current.IsFreeSpaceOrFiller()) {
#ifdef DEBUG
// We need to find a FixedArrayBase map after walking the fillers.
- while (current.IsFreeSpaceOrFiller()) {
+ while (!current.map_word().IsForwardingAddress() &&
+ current.IsFreeSpaceOrFiller()) {
Address next = current.ptr();
if (current.map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
next += kTaggedSize;
@@ -4466,7 +4331,8 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
}
current = HeapObject::cast(Object(next));
}
- DCHECK(current.IsFixedArrayBase());
+ DCHECK(current.map_word().IsForwardingAddress() ||
+ current.IsFixedArrayBase());
#endif // DEBUG
p.store(Smi::zero());
}
@@ -4770,9 +4636,9 @@ void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
FLAG_semi_space_growth_factor = 2;
}
- old_generation_allocation_limit_ = initial_old_generation_size_;
+ set_old_generation_allocation_limit(initial_old_generation_size_);
global_allocation_limit_ =
- GlobalMemorySizeFromV8Size(old_generation_allocation_limit_);
+ GlobalMemorySizeFromV8Size(old_generation_allocation_limit());
initial_max_old_generation_size_ = max_old_generation_size();
// We rely on being able to allocate new arrays in paged spaces.
@@ -4881,8 +4747,8 @@ bool Heap::AllocationLimitOvershotByLargeMargin() {
uint64_t size_now =
OldGenerationSizeOfObjects() + AllocatedExternalMemorySinceMarkCompact();
- const size_t v8_overshoot = old_generation_allocation_limit_ < size_now
- ? size_now - old_generation_allocation_limit_
+ const size_t v8_overshoot = old_generation_allocation_limit() < size_now
+ ? size_now - old_generation_allocation_limit()
: 0;
const size_t global_overshoot =
global_allocation_limit_ < GlobalSizeOfObjects()
@@ -4898,8 +4764,8 @@ bool Heap::AllocationLimitOvershotByLargeMargin() {
// Overshoot margin is 50% of allocation limit or half-way to the max heap
// with special handling of small heaps.
const size_t v8_margin =
- Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
- (max_old_generation_size() - old_generation_allocation_limit_) / 2);
+ Min(Max(old_generation_allocation_limit() / 2, kMarginForSmallHeaps),
+ (max_old_generation_size() - old_generation_allocation_limit()) / 2);
const size_t global_margin =
Min(Max(global_allocation_limit_ / 2, kMarginForSmallHeaps),
(max_global_memory_size_ - global_allocation_limit_) / 2);
@@ -4907,6 +4773,15 @@ bool Heap::AllocationLimitOvershotByLargeMargin() {
return v8_overshoot >= v8_margin || global_overshoot >= global_margin;
}
+// static
+int Heap::MaxRegularHeapObjectSize(AllocationType allocation) {
+ if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
+ (allocation == AllocationType::kCode)) {
+ return MemoryChunkLayout::MaxRegularCodeObjectSize();
+ }
+ return kMaxRegularHeapObjectSize;
+}
+
bool Heap::ShouldOptimizeForLoadTime() {
return isolate()->rail_mode() == PERFORMANCE_LOAD &&
!AllocationLimitOvershotByLargeMargin() &&
@@ -4930,6 +4805,9 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) {
// Ensure that retry of allocation on background thread succeeds
if (IsRetryOfFailedAllocation(local_heap)) return true;
+ // Background thread requested GC, allocation should fail
+ if (CollectionRequested()) return false;
+
if (ShouldOptimizeForMemoryUsage()) return false;
if (ShouldOptimizeForLoadTime()) return true;
@@ -4983,7 +4861,7 @@ double Heap::PercentToOldGenerationLimit() {
double size_now =
OldGenerationSizeOfObjects() + AllocatedExternalMemorySinceMarkCompact();
double current_bytes = size_now - size_at_gc;
- double total_bytes = old_generation_allocation_limit_ - size_at_gc;
+ double total_bytes = old_generation_allocation_limit() - size_at_gc;
return total_bytes > 0 ? (current_bytes / total_bytes) * 100.0 : 0;
}
@@ -4992,7 +4870,7 @@ double Heap::PercentToGlobalMemoryLimit() {
double size_now =
OldGenerationSizeOfObjects() + AllocatedExternalMemorySinceMarkCompact();
double current_bytes = size_now - size_at_gc;
- double total_bytes = old_generation_allocation_limit_ - size_at_gc;
+ double total_bytes = old_generation_allocation_limit() - size_at_gc;
return total_bytes > 0 ? (current_bytes / total_bytes) * 100.0 : 0;
}
@@ -5113,40 +4991,20 @@ void Heap::DisableInlineAllocation() {
}
}
-HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
- // Code objects which should stay at a fixed address are allocated either
- // in the first page of code space, in large object space, or (during
- // snapshot creation) the containing page is marked as immovable.
- DCHECK(!heap_object.is_null());
-#ifndef V8_ENABLE_THIRD_PARTY_HEAP
- DCHECK(code_space_->Contains(heap_object));
-#endif
- DCHECK_GE(object_size, 0);
- if (!Heap::IsImmovable(heap_object)) {
- if (isolate()->serializer_enabled() ||
- code_space_->first_page()->Contains(heap_object.address())) {
- BasicMemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate();
- } else {
- // Discard the first code allocation, which was on a page where it could
- // be moved.
- CreateFillerObjectAt(heap_object.address(), object_size,
- ClearRecordedSlots::kNo);
- heap_object = AllocateRawCodeInLargeObjectSpace(object_size);
- UnprotectAndRegisterMemoryChunk(heap_object);
- ZapCodeObject(heap_object.address(), object_size);
- OnAllocationEvent(heap_object, object_size);
- }
- }
- return heap_object;
-}
-
HeapObject Heap::AllocateRawWithLightRetrySlowPath(
int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment) {
HeapObject result;
AllocationResult alloc = AllocateRaw(size, allocation, origin, alignment);
if (alloc.To(&result)) {
- DCHECK(result != ReadOnlyRoots(this).exception());
+ // DCHECK that the successful allocation is not "exception". The one
+ // exception to this is when allocating the "exception" object itself, in
+ // which case this must be an ROSpace allocation and the exception object
+ // in the roots has to be unset.
+ DCHECK((CanAllocateInReadOnlySpace() &&
+ allocation == AllocationType::kReadOnly &&
+ ReadOnlyRoots(this).unchecked_exception() == Smi::zero()) ||
+ result != ReadOnlyRoots(this).exception());
return result;
}
// Two GCs before panicking. In newspace will almost always succeed.
@@ -5185,40 +5043,6 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
return HeapObject();
}
-// TODO(jkummerow): Refactor this. AllocateRaw should take an "immovability"
-// parameter and just do what's necessary.
-HeapObject Heap::AllocateRawCodeInLargeObjectSpace(int size) {
- AllocationResult alloc = code_lo_space()->AllocateRaw(size);
- HeapObject result;
- if (alloc.To(&result)) {
- DCHECK(result != ReadOnlyRoots(this).exception());
- return result;
- }
- // Two GCs before panicking.
- for (int i = 0; i < 2; i++) {
- CollectGarbage(alloc.RetrySpace(),
- GarbageCollectionReason::kAllocationFailure);
- alloc = code_lo_space()->AllocateRaw(size);
- if (alloc.To(&result)) {
- DCHECK(result != ReadOnlyRoots(this).exception());
- return result;
- }
- }
- isolate()->counters()->gc_last_resort_from_handles()->Increment();
- CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
- {
- AlwaysAllocateScope scope(this);
- alloc = code_lo_space()->AllocateRaw(size);
- }
- if (alloc.To(&result)) {
- DCHECK(result != ReadOnlyRoots(this).exception());
- return result;
- }
- // TODO(1181417): Fix this.
- FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
- return HeapObject();
-}
-
void Heap::SetUp() {
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
allocation_timeout_ = NextAllocationTimeout();
@@ -5513,7 +5337,7 @@ void Heap::StartTearDown() {
// process the event queue anymore. Avoid this deadlock by allowing all
// allocations after tear down was requested to make sure all background
// threads finish.
- collection_barrier_.ShutdownRequested();
+ collection_barrier_->ShutdownRequested();
#ifdef VERIFY_HEAP
// {StartTearDown} is called fairly early during Isolate teardown, so it's
@@ -5529,6 +5353,9 @@ void Heap::StartTearDown() {
void Heap::TearDown() {
DCHECK_EQ(gc_state(), TEAR_DOWN);
+ if (FLAG_concurrent_marking || FLAG_parallel_marking)
+ concurrent_marking_->Pause();
+
// It's too late for Heap::Verify() here, as parts of the Isolate are
// already gone by the time this is called.
@@ -6826,7 +6653,7 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
return true;
}
-void Heap::SetEmbedderStackStateForNextFinalizaton(
+void Heap::SetEmbedderStackStateForNextFinalization(
EmbedderHeapTracer::EmbedderStackState stack_state) {
local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
stack_state);
@@ -6839,5 +6666,41 @@ void Heap::IncrementObjectCounters() {
}
#endif // DEBUG
+// StrongRootBlocks are allocated as a block of addresses, prefixed with a
+// StrongRootsEntry pointer:
+//
+// | StrongRootsEntry*
+// | Address 1
+// | ...
+// | Address N
+//
+// The allocate method registers the range "Address 1" to "Address N" with the
+// heap as a strong root array, saves that entry in StrongRootsEntry*, and
+// returns a pointer to Address 1.
+Address* StrongRootBlockAllocator::allocate(size_t n) {
+ void* block = malloc(sizeof(StrongRootsEntry*) + n * sizeof(Address));
+
+ StrongRootsEntry** header = reinterpret_cast<StrongRootsEntry**>(block);
+ Address* ret = reinterpret_cast<Address*>(reinterpret_cast<char*>(block) +
+ sizeof(StrongRootsEntry*));
+
+ memset(ret, kNullAddress, n * sizeof(Address));
+ *header =
+ heap_->RegisterStrongRoots(FullObjectSlot(ret), FullObjectSlot(ret + n));
+
+ return ret;
+}
+
+void StrongRootBlockAllocator::deallocate(Address* p, size_t n) noexcept {
+ // The allocate method returns a pointer to Address 1, so the deallocate
+ // method has to offset that pointer back by sizeof(StrongRootsEntry*).
+ void* block = reinterpret_cast<char*>(p) - sizeof(StrongRootsEntry*);
+ StrongRootsEntry** header = reinterpret_cast<StrongRootsEntry**>(block);
+
+ heap_->UnregisterStrongRoots(*header);
+
+ free(block);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index b8220dad5e..18064ac731 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -66,6 +66,7 @@ class ArrayBufferCollector;
class ArrayBufferSweeper;
class BasicMemoryChunk;
class CodeLargeObjectSpace;
+class CollectionBarrier;
class ConcurrentMarking;
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
@@ -149,10 +150,11 @@ enum class GarbageCollectionReason {
kTesting = 21,
kExternalFinalize = 22,
kGlobalAllocationLimit = 23,
- kMeasureMemory = 24
+ kMeasureMemory = 24,
+ kBackgroundAllocationFailure = 25,
// If you add new items here, then update the incremental_marking_reason,
// mark_compact_reason, and scavenge_reason counters in counters.h.
- // Also update src/tools/metrics/histograms/histograms.xml in chromium.
+ // Also update src/tools/metrics/histograms/enums.xml in chromium.
};
enum class YoungGenerationHandling {
@@ -542,7 +544,7 @@ class Heap {
bool IsImmovable(HeapObject object);
- static bool IsLargeObject(HeapObject object);
+ V8_EXPORT_PRIVATE static bool IsLargeObject(HeapObject object);
// This method supports the deserialization allocator. All allocations
// are word-aligned. The method should never fail to allocate since the
@@ -658,6 +660,7 @@ class Heap {
}
void SetGCState(HeapState state);
bool IsTearingDown() const { return gc_state() == TEAR_DOWN; }
+ bool force_oom() const { return force_oom_; }
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
@@ -666,10 +669,8 @@ class Heap {
template <FindMementoMode mode>
inline AllocationMemento FindAllocationMemento(Map map, HeapObject object);
- // Returns false if not able to reserve.
- bool ReserveSpace(Reservation* reservations, std::vector<Address>* maps);
-
- void RequestAndWaitForCollection();
+ // Requests collection and blocks until GC is finished.
+ void RequestCollectionBackground(LocalHeap* local_heap);
//
// Support for the API.
@@ -770,9 +771,14 @@ class Heap {
V8_EXPORT_PRIVATE bool ShouldOptimizeForMemoryUsage();
bool HighMemoryPressure() {
- return memory_pressure_level_ != MemoryPressureLevel::kNone;
+ return memory_pressure_level_.load(std::memory_order_relaxed) !=
+ MemoryPressureLevel::kNone;
}
+ bool CollectionRequested();
+
+ void CheckCollectionRequested();
+
void RestoreHeapLimit(size_t heap_limit) {
// Do not set the limit lower than the live size + some slack.
size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
@@ -1061,10 +1067,6 @@ class Heap {
V8_EXPORT_PRIVATE void FinalizeIncrementalMarkingAtomically(
GarbageCollectionReason gc_reason);
- void RegisterDeserializedObjectsForBlackAllocation(
- Reservation* reservations, const std::vector<HeapObject>& large_objects,
- const std::vector<Address>& maps);
-
IncrementalMarking* incremental_marking() {
return incremental_marking_.get();
}
@@ -1126,7 +1128,7 @@ class Heap {
EmbedderHeapTracer* GetEmbedderHeapTracer() const;
void RegisterExternallyReferencedObject(Address* location);
- void SetEmbedderStackStateForNextFinalizaton(
+ V8_EXPORT_PRIVATE void SetEmbedderStackStateForNextFinalization(
EmbedderHeapTracer::EmbedderStackState stack_state);
EmbedderHeapTracer::TraceFlags flags_for_embedder_tracer() const;
@@ -1365,6 +1367,14 @@ class Heap {
// more eager to finalize incremental marking.
bool AllocationLimitOvershotByLargeMargin();
+ // Return the maximum size objects can be before having to allocate them as
+ // large objects. This takes into account allocating in the code space for
+ // which the size of the allocatable space per V8 page may depend on the OS
+ // page size at runtime. You may use kMaxRegularHeapObjectSize as a constant
+ // instead if you know the allocation isn't in the code spaces.
+ V8_EXPORT_PRIVATE static int MaxRegularHeapObjectSize(
+ AllocationType allocation);
+
// ===========================================================================
// Prologue/epilogue callback methods.========================================
// ===========================================================================
@@ -1574,22 +1584,6 @@ class Heap {
DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
};
- class CollectionBarrier {
- Heap* heap_;
- base::Mutex mutex_;
- base::ConditionVariable cond_;
- bool gc_requested_;
- bool shutdown_requested_;
-
- public:
- explicit CollectionBarrier(Heap* heap)
- : heap_(heap), gc_requested_(false), shutdown_requested_(false) {}
-
- void CollectionPerformed();
- void ShutdownRequested();
- void Wait();
- };
-
struct StringTypeTable {
InstanceType type;
int size;
@@ -1851,8 +1845,8 @@ class Heap {
uint64_t bytes = OldGenerationSizeOfObjects() +
AllocatedExternalMemorySinceMarkCompact();
- if (old_generation_allocation_limit_ <= bytes) return 0;
- return old_generation_allocation_limit_ - static_cast<size_t>(bytes);
+ if (old_generation_allocation_limit() <= bytes) return 0;
+ return old_generation_allocation_limit() - static_cast<size_t>(bytes);
}
void UpdateTotalGCTime(double duration);
@@ -1885,7 +1879,11 @@ class Heap {
bool ShouldOptimizeForLoadTime();
size_t old_generation_allocation_limit() const {
- return old_generation_allocation_limit_;
+ return old_generation_allocation_limit_.load(std::memory_order_relaxed);
+ }
+
+ void set_old_generation_allocation_limit(size_t newlimit) {
+ old_generation_allocation_limit_.store(newlimit, std::memory_order_relaxed);
}
size_t global_allocation_limit() const { return global_allocation_limit_; }
@@ -1984,17 +1982,10 @@ class Heap {
int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment = kWordAligned);
- V8_WARN_UNUSED_RESULT HeapObject AllocateRawCodeInLargeObjectSpace(int size);
-
// Allocates a heap object based on the map.
V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map map,
AllocationType allocation);
- // Takes a code object and checks if it is on memory which is not subject to
- // compaction. This method will return a new code object on an immovable
- // memory location if the original code object was movable.
- HeapObject EnsureImmovableCode(HeapObject heap_object, int object_size);
-
// Allocates a partial map for bootstrapping.
V8_WARN_UNUSED_RESULT AllocationResult
AllocatePartialMap(InstanceType instance_type, int instance_size);
@@ -2002,6 +1993,9 @@ class Heap {
void FinalizePartialMap(Map map);
void set_force_oom(bool value) { force_oom_ = value; }
+ void set_force_gc_on_next_allocation() {
+ force_gc_on_next_allocation_ = true;
+ }
// ===========================================================================
// Retaining path tracing ====================================================
@@ -2072,7 +2066,7 @@ class Heap {
// and reset by a mark-compact garbage collection.
std::atomic<MemoryPressureLevel> memory_pressure_level_;
- std::vector<std::pair<v8::NearHeapLimitCallback, void*> >
+ std::vector<std::pair<v8::NearHeapLimitCallback, void*>>
near_heap_limit_callbacks_;
// For keeping track of context disposals.
@@ -2149,7 +2143,7 @@ class Heap {
// is checked when we have already decided to do a GC to help determine
// which collector to invoke, before expanding a paged space in the old
// generation and on every allocation in large object space.
- size_t old_generation_allocation_limit_ = 0;
+ std::atomic<size_t> old_generation_allocation_limit_{0};
size_t global_allocation_limit_ = 0;
// Indicates that inline bump-pointer allocation has been globally disabled
@@ -2275,7 +2269,7 @@ class Heap {
base::Mutex relocation_mutex_;
- CollectionBarrier collection_barrier_;
+ std::unique_ptr<CollectionBarrier> collection_barrier_;
int gc_callbacks_depth_ = 0;
@@ -2285,6 +2279,7 @@ class Heap {
// Used for testing purposes.
bool force_oom_ = false;
+ bool force_gc_on_next_allocation_ = false;
bool delay_sweeper_tasks_for_testing_ = false;
HeapObject pending_layout_change_object_;
@@ -2347,6 +2342,7 @@ class Heap {
// The allocator interface.
friend class Factory;
+ friend class Deserializer;
// The Isolate constructs us.
friend class Isolate;
@@ -2602,6 +2598,32 @@ T ForwardingAddress(T heap_obj) {
}
}
+// Address block allocator compatible with standard containers which registers
+// its allocated range as strong roots.
+class StrongRootBlockAllocator {
+ public:
+ using pointer = Address*;
+ using const_pointer = const Address*;
+ using reference = Address&;
+ using const_reference = const Address&;
+ using value_type = Address;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+ template <class U>
+ struct rebind {
+ STATIC_ASSERT((std::is_same<Address, U>::value));
+ using other = StrongRootBlockAllocator;
+ };
+
+ explicit StrongRootBlockAllocator(Heap* heap) : heap_(heap) {}
+
+ Address* allocate(size_t n);
+ void deallocate(Address* p, size_t n) noexcept;
+
+ private:
+ Heap* heap_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index c5206adf81..fb0ee2ecab 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -246,7 +246,7 @@ void IncrementalMarking::StartMarking() {
MarkRoots();
if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
- heap_->concurrent_marking()->ScheduleTasks();
+ heap_->concurrent_marking()->ScheduleJob();
}
// Ready to start incremental marking.
@@ -501,109 +501,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
}
});
- UpdateWeakReferencesAfterScavenge();
-}
-
-void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
- weak_objects_->weak_references.Update(
- [](std::pair<HeapObject, HeapObjectSlot> slot_in,
- std::pair<HeapObject, HeapObjectSlot>* slot_out) -> bool {
- HeapObject heap_obj = slot_in.first;
- HeapObject forwarded = ForwardingAddress(heap_obj);
-
- if (!forwarded.is_null()) {
- ptrdiff_t distance_to_slot =
- slot_in.second.address() - slot_in.first.ptr();
- Address new_slot = forwarded.ptr() + distance_to_slot;
- slot_out->first = forwarded;
- slot_out->second = HeapObjectSlot(new_slot);
- return true;
- }
-
- return false;
- });
- weak_objects_->weak_objects_in_code.Update(
- [](std::pair<HeapObject, Code> slot_in,
- std::pair<HeapObject, Code>* slot_out) -> bool {
- HeapObject heap_obj = slot_in.first;
- HeapObject forwarded = ForwardingAddress(heap_obj);
-
- if (!forwarded.is_null()) {
- slot_out->first = forwarded;
- slot_out->second = slot_in.second;
- return true;
- }
-
- return false;
- });
- weak_objects_->ephemeron_hash_tables.Update(
- [](EphemeronHashTable slot_in, EphemeronHashTable* slot_out) -> bool {
- EphemeronHashTable forwarded = ForwardingAddress(slot_in);
-
- if (!forwarded.is_null()) {
- *slot_out = forwarded;
- return true;
- }
-
- return false;
- });
-
- auto ephemeron_updater = [](Ephemeron slot_in, Ephemeron* slot_out) -> bool {
- HeapObject key = slot_in.key;
- HeapObject value = slot_in.value;
- HeapObject forwarded_key = ForwardingAddress(key);
- HeapObject forwarded_value = ForwardingAddress(value);
-
- if (!forwarded_key.is_null() && !forwarded_value.is_null()) {
- *slot_out = Ephemeron{forwarded_key, forwarded_value};
- return true;
- }
-
- return false;
- };
-
- weak_objects_->current_ephemerons.Update(ephemeron_updater);
- weak_objects_->next_ephemerons.Update(ephemeron_updater);
- weak_objects_->discovered_ephemerons.Update(ephemeron_updater);
-
- weak_objects_->flushed_js_functions.Update(
- [](JSFunction slot_in, JSFunction* slot_out) -> bool {
- JSFunction forwarded = ForwardingAddress(slot_in);
-
- if (!forwarded.is_null()) {
- *slot_out = forwarded;
- return true;
- }
-
- return false;
- });
-#ifdef DEBUG
- weak_objects_->bytecode_flushing_candidates.Iterate(
- [](SharedFunctionInfo candidate) {
- DCHECK(!Heap::InYoungGeneration(candidate));
- });
-#endif
-
- if (FLAG_harmony_weak_refs) {
- weak_objects_->js_weak_refs.Update(
- [](JSWeakRef js_weak_ref_in, JSWeakRef* js_weak_ref_out) -> bool {
- JSWeakRef forwarded = ForwardingAddress(js_weak_ref_in);
-
- if (!forwarded.is_null()) {
- *js_weak_ref_out = forwarded;
- return true;
- }
-
- return false;
- });
-
-#ifdef DEBUG
- // TODO(syg, marja): Support WeakCells in the young generation.
- weak_objects_->weak_cells.Iterate([](WeakCell weak_cell) {
- DCHECK(!Heap::InYoungGeneration(weak_cell));
- });
-#endif
- }
+ weak_objects_->UpdateAfterScavenge();
}
void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
@@ -1104,7 +1002,7 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
}
if (FLAG_concurrent_marking) {
local_marking_worklists()->ShareWork();
- heap_->concurrent_marking()->RescheduleTasksIfNeeded();
+ heap_->concurrent_marking()->RescheduleJobIfNeeded();
}
}
if (state_ == MARKING) {
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 29df137711..b259cacb93 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -81,7 +81,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
static constexpr size_t kGlobalActivationThreshold = 0;
#endif
-#ifdef V8_CONCURRENT_MARKING
+#ifdef V8_ATOMIC_MARKING_STATE
static const AccessMode kAtomicity = AccessMode::ATOMIC;
#else
static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;
@@ -146,7 +146,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
void FinalizeIncrementally();
void UpdateMarkingWorklistAfterScavenge();
- void UpdateWeakReferencesAfterScavenge();
void UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space);
void Hurry();
diff --git a/deps/v8/src/heap/local-heap-inl.h b/deps/v8/src/heap/local-heap-inl.h
index 770e1cb8e9..89f35ec21f 100644
--- a/deps/v8/src/heap/local-heap-inl.h
+++ b/deps/v8/src/heap/local-heap-inl.h
@@ -20,13 +20,13 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(AllowGarbageCollection::IsAllowed());
- DCHECK_IMPLIES(type == AllocationType::kCode,
- alignment == AllocationAlignment::kCodeAligned);
+ DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap,
+ alignment == AllocationAlignment::kWordAligned);
Heap::HeapState state = heap()->gc_state();
DCHECK(state == Heap::TEAR_DOWN || state == Heap::NOT_IN_GC);
#endif
- bool large_object = size_in_bytes > kMaxRegularHeapObjectSize;
+ bool large_object = size_in_bytes > Heap::MaxRegularHeapObjectSize(type);
CHECK_EQ(type, AllocationType::kOld);
if (large_object)
diff --git a/deps/v8/src/heap/local-heap.cc b/deps/v8/src/heap/local-heap.cc
index a17c22a6d2..b54df4aae1 100644
--- a/deps/v8/src/heap/local-heap.cc
+++ b/deps/v8/src/heap/local-heap.cc
@@ -24,10 +24,11 @@ thread_local LocalHeap* current_local_heap = nullptr;
LocalHeap* LocalHeap::Current() { return current_local_heap; }
-LocalHeap::LocalHeap(Heap* heap,
+LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
std::unique_ptr<PersistentHandles> persistent_handles)
: heap_(heap),
- state_(ThreadState::Running),
+ is_main_thread_(kind == ThreadKind::kMain),
+ state_(ThreadState::Parked),
safepoint_requested_(false),
allocation_failed_(false),
prev_(nullptr),
@@ -36,34 +37,35 @@ LocalHeap::LocalHeap(Heap* heap,
persistent_handles_(std::move(persistent_handles)),
marking_barrier_(new MarkingBarrier(this)),
old_space_allocator_(this, heap->old_space()) {
- heap_->safepoint()->AddLocalHeap(this);
+ heap_->safepoint()->AddLocalHeap(this, [this] {
+ if (FLAG_local_heaps) {
+ WriteBarrier::SetForThread(marking_barrier_.get());
+ if (heap_->incremental_marking()->IsMarking()) {
+ marking_barrier_->Activate(
+ heap_->incremental_marking()->IsCompacting());
+ }
+ }
+ });
+
if (persistent_handles_) {
persistent_handles_->Attach(this);
}
DCHECK_NULL(current_local_heap);
current_local_heap = this;
- // TODO(ulan): Ensure that LocalHeap cannot be created without --local-heaps.
- if (FLAG_local_heaps) {
- WriteBarrier::SetForThread(marking_barrier_.get());
- if (heap_->incremental_marking()->IsMarking()) {
- marking_barrier_->Activate(heap_->incremental_marking()->IsCompacting());
- }
- }
}
LocalHeap::~LocalHeap() {
- // TODO(ulan): Ensure that LocalHeap cannot be created without --local-heaps.
- if (FLAG_local_heaps) {
- marking_barrier_->Publish();
- WriteBarrier::ClearForThread(marking_barrier_.get());
- }
- // Give up LAB before parking thread
- old_space_allocator_.FreeLinearAllocationArea();
-
// Park thread since removing the local heap could block.
EnsureParkedBeforeDestruction();
- heap_->safepoint()->RemoveLocalHeap(this);
+ heap_->safepoint()->RemoveLocalHeap(this, [this] {
+ old_space_allocator_.FreeLinearAllocationArea();
+
+ if (FLAG_local_heaps) {
+ marking_barrier_->Publish();
+ WriteBarrier::ClearForThread(marking_barrier_.get());
+ }
+ });
DCHECK_EQ(current_local_heap, this);
current_local_heap = nullptr;
@@ -77,6 +79,13 @@ void LocalHeap::EnsurePersistentHandles() {
}
}
+void LocalHeap::AttachPersistentHandles(
+ std::unique_ptr<PersistentHandles> persistent_handles) {
+ DCHECK_NULL(persistent_handles_);
+ persistent_handles_ = std::move(persistent_handles);
+ persistent_handles_->Attach(this);
+}
+
std::unique_ptr<PersistentHandles> LocalHeap::DetachPersistentHandles() {
if (persistent_handles_) persistent_handles_->Detach();
return std::move(persistent_handles_);
@@ -116,6 +125,7 @@ void LocalHeap::Unpark() {
}
void LocalHeap::EnsureParkedBeforeDestruction() {
+ if (IsParked()) return;
base::MutexGuard guard(&state_mutex_);
state_ = ThreadState::Parked;
state_change_.NotifyAll();
@@ -150,6 +160,11 @@ void LocalHeap::UnmarkLinearAllocationArea() {
old_space_allocator_.UnmarkLinearAllocationArea();
}
+void LocalHeap::PerformCollection() {
+ ParkedScope scope(this);
+ heap_->RequestCollectionBackground(this);
+}
+
Address LocalHeap::PerformCollectionAndAllocateAgain(
int object_size, AllocationType type, AllocationOrigin origin,
AllocationAlignment alignment) {
@@ -157,10 +172,7 @@ Address LocalHeap::PerformCollectionAndAllocateAgain(
static const int kMaxNumberOfRetries = 3;
for (int i = 0; i < kMaxNumberOfRetries; i++) {
- {
- ParkedScope scope(this);
- heap_->RequestAndWaitForCollection();
- }
+ PerformCollection();
AllocationResult result = AllocateRaw(object_size, type, origin, alignment);
if (!result.IsRetry()) {
diff --git a/deps/v8/src/heap/local-heap.h b/deps/v8/src/heap/local-heap.h
index f6244aaefe..bd2a14760c 100644
--- a/deps/v8/src/heap/local-heap.h
+++ b/deps/v8/src/heap/local-heap.h
@@ -22,10 +22,19 @@ class Heap;
class Safepoint;
class LocalHandles;
+// LocalHeap is used by the GC to track all threads with heap access in order to
+// stop them before performing a collection. LocalHeaps can be either Parked or
+// Running and are in Parked mode when initialized.
+// Running: Thread is allowed to access the heap but needs to give the GC the
+// chance to run regularly by manually invoking Safepoint(). The
+// thread can be parked using ParkedScope.
+// Parked: Heap access is not allowed, so the GC will not stop this thread
+// for a collection. Useful when threads do not need heap access for
+// some time or for blocking operations like locking a mutex.
class V8_EXPORT_PRIVATE LocalHeap {
public:
explicit LocalHeap(
- Heap* heap,
+ Heap* heap, ThreadKind kind,
std::unique_ptr<PersistentHandles> persistent_handles = nullptr);
~LocalHeap();
@@ -70,6 +79,8 @@ class V8_EXPORT_PRIVATE LocalHeap {
return kNullMaybeHandle;
}
+ void AttachPersistentHandles(
+ std::unique_ptr<PersistentHandles> persistent_handles);
std::unique_ptr<PersistentHandles> DetachPersistentHandles();
#ifdef DEBUG
bool ContainsPersistentHandle(Address* location);
@@ -115,6 +126,11 @@ class V8_EXPORT_PRIVATE LocalHeap {
AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned);
+ bool is_main_thread() const { return is_main_thread_; }
+
+ // Requests GC and blocks until the collection finishes.
+ void PerformCollection();
+
private:
enum class ThreadState {
// Threads in this state need to be stopped in a safepoint.
@@ -147,6 +163,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
void EnterSafepoint();
Heap* heap_;
+ bool is_main_thread_;
base::Mutex state_mutex_;
base::ConditionVariable state_change_;
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index c49bad62cc..a9db17f2aa 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -135,8 +135,8 @@ void MainMarkingVisitor<MarkingState>::MarkDescriptorArrayFromWriteBarrier(
}
template <LiveObjectIterationMode mode>
-LiveObjectRange<mode>::iterator::iterator(MemoryChunk* chunk, Bitmap* bitmap,
- Address start)
+LiveObjectRange<mode>::iterator::iterator(const MemoryChunk* chunk,
+ Bitmap* bitmap, Address start)
: chunk_(chunk),
one_word_filler_map_(
ReadOnlyRoots(chunk->heap()).one_pointer_filler_map()),
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 6e00912e61..91a1902182 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -6,6 +6,7 @@
#include <unordered_map>
+#include "src/base/optional.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/compilation-cache.h"
#include "src/deoptimizer/deoptimizer.h"
@@ -19,6 +20,7 @@
#include "src/heap/code-object-registry.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking-inl.h"
+#include "src/heap/index-generator.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/large-spaces.h"
@@ -31,6 +33,7 @@
#include "src/heap/memory-measurement.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/parallel-work-item.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/safepoint.h"
@@ -116,30 +119,28 @@ void MarkingVerifier::VerifyRoots() {
void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
Address end) {
- HeapObject object;
Address next_object_must_be_here_or_later = start;
- for (Address current = start; current < end;) {
- object = HeapObject::FromAddress(current);
- // One word fillers at the end of a black area can be grey.
- if (IsBlackOrGrey(object) &&
- object.map() != ReadOnlyRoots(heap_).one_pointer_filler_map()) {
- CHECK(IsMarked(object));
- CHECK(current >= next_object_must_be_here_or_later);
- object.Iterate(this);
- next_object_must_be_here_or_later = current + object.Size();
- // The object is either part of a black area of black allocation or a
- // regular black object
- CHECK(
- bitmap(page)->AllBitsSetInRange(
+
+ for (auto object_and_size :
+ LiveObjectRange<kAllLiveObjects>(page, bitmap(page))) {
+ HeapObject object = object_and_size.first;
+ size_t size = object_and_size.second;
+ Address current = object.address();
+ if (current < start) continue;
+ if (current >= end) break;
+ CHECK(IsMarked(object));
+ CHECK(current >= next_object_must_be_here_or_later);
+ object.Iterate(this);
+ next_object_must_be_here_or_later = current + size;
+ // The object is either part of a black area of black allocation or a
+ // regular black object
+ CHECK(bitmap(page)->AllBitsSetInRange(
page->AddressToMarkbitIndex(current),
page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
bitmap(page)->AllBitsClearInRange(
page->AddressToMarkbitIndex(current + kTaggedSize * 2),
page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
- current = next_object_must_be_here_or_later;
- } else {
- current += kTaggedSize;
- }
+ current = next_object_must_be_here_or_later;
}
}
@@ -390,11 +391,8 @@ int NumberOfAvailableCores() {
} // namespace
-int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
- DCHECK_GT(pages, 0);
- int tasks = FLAG_parallel_compaction ? Min(NumberOfAvailableCores(),
- pages / (MB / Page::kPageSize) + 1)
- : 1;
+int MarkCompactCollectorBase::NumberOfParallelCompactionTasks() {
+ int tasks = FLAG_parallel_compaction ? NumberOfAvailableCores() : 1;
if (!heap_->CanPromoteYoungAndExpandOldGeneration(
static_cast<size_t>(tasks * Page::kPageSize))) {
// Optimize for memory usage near the heap limit.
@@ -403,30 +401,6 @@ int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
return tasks;
}
-int MarkCompactCollectorBase::NumberOfParallelPointerUpdateTasks(int pages,
- int slots) {
- DCHECK_GT(pages, 0);
- // Limit the number of update tasks as task creation often dominates the
- // actual work that is being done.
- const int kMaxPointerUpdateTasks = 8;
- const int kSlotsPerTask = 600;
- const int wanted_tasks =
- (slots >= 0) ? Max(1, Min(pages, slots / kSlotsPerTask)) : pages;
- return FLAG_parallel_pointer_update
- ? Min(kMaxPointerUpdateTasks,
- Min(NumberOfAvailableCores(), wanted_tasks))
- : 1;
-}
-
-int MarkCompactCollectorBase::NumberOfParallelToSpacePointerUpdateTasks(
- int pages) {
- DCHECK_GT(pages, 0);
- // No cap needed because all pages we need to process are fully filled with
- // interesting objects.
- return FLAG_parallel_pointer_update ? Min(NumberOfAvailableCores(), pages)
- : 1;
-}
-
MarkCompactCollector::MarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap),
page_parallel_job_semaphore_(0),
@@ -906,12 +880,11 @@ void MarkCompactCollector::Prepare() {
heap()->new_space()->original_top_acquire());
}
-void MarkCompactCollector::FinishConcurrentMarking(
- ConcurrentMarking::StopRequest stop_request) {
+void MarkCompactCollector::FinishConcurrentMarking() {
// FinishConcurrentMarking is called for both, concurrent and parallel,
// marking. It is safe to call this function when tasks are already finished.
if (FLAG_parallel_marking || FLAG_concurrent_marking) {
- heap()->concurrent_marking()->Stop(stop_request);
+ heap()->concurrent_marking()->Join();
heap()->concurrent_marking()->FlushMemoryChunkData(
non_atomic_marking_state());
heap()->concurrent_marking()->FlushNativeContexts(&native_context_stats_);
@@ -1665,12 +1638,12 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
if (FLAG_parallel_marking) {
- heap_->concurrent_marking()->RescheduleTasksIfNeeded();
+ heap_->concurrent_marking()->RescheduleJobIfNeeded(
+ TaskPriority::kUserBlocking);
}
work_to_do = ProcessEphemerons();
- FinishConcurrentMarking(
- ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
+ FinishConcurrentMarking();
}
CHECK(weak_objects_.current_ephemerons.IsEmpty());
@@ -1985,12 +1958,12 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
if (FLAG_parallel_marking) {
- heap_->concurrent_marking()->RescheduleTasksIfNeeded();
+ heap_->concurrent_marking()->RescheduleJobIfNeeded(
+ TaskPriority::kUserBlocking);
}
DrainMarkingWorklist();
- FinishConcurrentMarking(
- ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
+ FinishConcurrentMarking();
DrainMarkingWorklist();
}
@@ -2173,8 +2146,8 @@ void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
DCHECK_EQ(map.raw_transitions(), HeapObjectReference::Weak(dead_target));
// Take ownership of the descriptor array.
int number_of_own_descriptors = map.NumberOfOwnDescriptors();
- DescriptorArray descriptors = map.instance_descriptors();
- if (descriptors == dead_target.instance_descriptors() &&
+ DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
+ if (descriptors == dead_target.instance_descriptors(kRelaxedLoad) &&
number_of_own_descriptors > 0) {
TrimDescriptorArray(map, descriptors);
DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
@@ -2244,7 +2217,7 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
// Use the raw function data setter to avoid validity checks, since we're
// performing the unusual task of decompiling.
- shared_info.set_function_data(uncompiled_data);
+ shared_info.set_function_data(uncompiled_data, kReleaseStore);
DCHECK(!shared_info.is_compiled());
}
@@ -2292,11 +2265,19 @@ void MarkCompactCollector::ClearFullMapTransitions() {
// filled. Allow it.
if (array.GetTargetIfExists(0, isolate(), &map)) {
DCHECK(!map.is_null()); // Weak pointers aren't cleared yet.
+ Object constructor_or_backpointer = map.constructor_or_backpointer();
+ if (constructor_or_backpointer.IsSmi()) {
+ DCHECK(isolate()->has_active_deserializer());
+ DCHECK_EQ(constructor_or_backpointer,
+ Deserializer::uninitialized_field_value());
+ continue;
+ }
Map parent = Map::cast(map.constructor_or_backpointer());
bool parent_is_alive =
non_atomic_marking_state()->IsBlackOrGrey(parent);
DescriptorArray descriptors =
- parent_is_alive ? parent.instance_descriptors() : DescriptorArray();
+ parent_is_alive ? parent.instance_descriptors(kRelaxedLoad)
+ : DescriptorArray();
bool descriptors_owner_died =
CompactTransitionArray(parent, array, descriptors);
if (descriptors_owner_died) {
@@ -2320,7 +2301,7 @@ bool MarkCompactCollector::CompactTransitionArray(Map map,
DCHECK_EQ(target.constructor_or_backpointer(), map);
if (non_atomic_marking_state()->IsWhite(target)) {
if (!descriptors.is_null() &&
- target.instance_descriptors() == descriptors) {
+ target.instance_descriptors(kRelaxedLoad) == descriptors) {
DCHECK(!target.is_prototype_map());
descriptors_owner_died = true;
}
@@ -2394,7 +2375,7 @@ void MarkCompactCollector::TrimDescriptorArray(Map map,
descriptors.Sort();
if (FLAG_unbox_double_fields) {
- LayoutDescriptor layout_descriptor = map.layout_descriptor();
+ LayoutDescriptor layout_descriptor = map.layout_descriptor(kAcquireLoad);
layout_descriptor = layout_descriptor.Trim(heap_, map, descriptors,
number_of_own_descriptors);
SLOW_DCHECK(layout_descriptor.IsConsistentWithMap(map, true));
@@ -2702,8 +2683,7 @@ static inline SlotCallbackResult UpdateSlot(TSlot slot,
}
template <AccessMode access_mode, typename TSlot>
-static inline SlotCallbackResult UpdateSlot(const Isolate* isolate,
- TSlot slot) {
+static inline SlotCallbackResult UpdateSlot(IsolateRoot isolate, TSlot slot) {
typename TSlot::TObject obj = slot.Relaxed_Load(isolate);
HeapObject heap_obj;
if (TSlot::kCanBeWeak && obj->GetHeapObjectIfWeak(&heap_obj)) {
@@ -2716,7 +2696,7 @@ static inline SlotCallbackResult UpdateSlot(const Isolate* isolate,
}
template <AccessMode access_mode, typename TSlot>
-static inline SlotCallbackResult UpdateStrongSlot(const Isolate* isolate,
+static inline SlotCallbackResult UpdateStrongSlot(IsolateRoot isolate,
TSlot slot) {
typename TSlot::TObject obj = slot.Relaxed_Load(isolate);
DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(obj.ptr()));
@@ -2734,8 +2714,7 @@ static inline SlotCallbackResult UpdateStrongSlot(const Isolate* isolate,
// It does not expect to encounter pointers to dead objects.
class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
public:
- explicit PointersUpdatingVisitor(const Isolate* isolate)
- : isolate_(isolate) {}
+ explicit PointersUpdatingVisitor(IsolateRoot isolate) : isolate_(isolate) {}
void VisitPointer(HeapObject host, ObjectSlot p) override {
UpdateStrongSlotInternal(isolate_, p);
@@ -2790,32 +2769,32 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
}
private:
- static inline SlotCallbackResult UpdateRootSlotInternal(
- const Isolate* isolate, FullObjectSlot slot) {
+ static inline SlotCallbackResult UpdateRootSlotInternal(IsolateRoot isolate,
+ FullObjectSlot slot) {
return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
}
static inline SlotCallbackResult UpdateRootSlotInternal(
- const Isolate* isolate, OffHeapObjectSlot slot) {
+ IsolateRoot isolate, OffHeapObjectSlot slot) {
return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
}
static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
- const Isolate* isolate, MaybeObjectSlot slot) {
+ IsolateRoot isolate, MaybeObjectSlot slot) {
return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
}
- static inline SlotCallbackResult UpdateStrongSlotInternal(
- const Isolate* isolate, ObjectSlot slot) {
+ static inline SlotCallbackResult UpdateStrongSlotInternal(IsolateRoot isolate,
+ ObjectSlot slot) {
return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
}
- static inline SlotCallbackResult UpdateSlotInternal(const Isolate* isolate,
+ static inline SlotCallbackResult UpdateSlotInternal(IsolateRoot isolate,
MaybeObjectSlot slot) {
return UpdateSlot<AccessMode::NON_ATOMIC>(isolate, slot);
}
- const Isolate* isolate_;
+ IsolateRoot isolate_;
};
static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
@@ -2953,7 +2932,7 @@ class Evacuator : public Malloced {
// Merge back locally cached info sequentially. Note that this method needs
// to be called from the main thread.
- inline void Finalize();
+ virtual void Finalize();
virtual GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() = 0;
virtual GCTracer::Scope::ScopeId GetTracingScope() = 0;
@@ -3052,7 +3031,7 @@ class FullEvacuator : public Evacuator {
return GCTracer::Scope::MC_EVACUATE_COPY_PARALLEL;
}
- inline void Finalize() {
+ void Finalize() override {
Evacuator::Finalize();
for (auto it = ephemeron_remembered_set_.begin();
@@ -3121,48 +3100,68 @@ void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
}
}
-class EvacuationItem : public ItemParallelJob::Item {
- public:
- explicit EvacuationItem(MemoryChunk* chunk) : chunk_(chunk) {}
- ~EvacuationItem() override = default;
- MemoryChunk* chunk() const { return chunk_; }
-
- private:
- MemoryChunk* chunk_;
-};
-
-class PageEvacuationTask : public ItemParallelJob::Task {
+class PageEvacuationJob : public v8::JobTask {
public:
- PageEvacuationTask(Isolate* isolate, Evacuator* evacuator)
- : ItemParallelJob::Task(isolate),
- evacuator_(evacuator),
+ PageEvacuationJob(
+ Isolate* isolate, std::vector<std::unique_ptr<Evacuator>>* evacuators,
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items)
+ : evacuators_(evacuators),
+ evacuation_items_(std::move(evacuation_items)),
+ remaining_evacuation_items_(evacuation_items_.size()),
+ generator_(evacuation_items_.size()),
tracer_(isolate->heap()->tracer()) {}
- void RunInParallel(Runner runner) override {
- if (runner == Runner::kForeground) {
- TRACE_GC(tracer_, evacuator_->GetTracingScope());
- ProcessItems();
+ void Run(JobDelegate* delegate) override {
+ Evacuator* evacuator = (*evacuators_)[delegate->GetTaskId()].get();
+ if (delegate->IsJoiningThread()) {
+ TRACE_GC(tracer_, evacuator->GetTracingScope());
+ ProcessItems(delegate, evacuator);
} else {
- TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
- ProcessItems();
+ TRACE_BACKGROUND_GC(tracer_, evacuator->GetBackgroundTracingScope());
+ ProcessItems(delegate, evacuator);
+ }
+ }
+
+ void ProcessItems(JobDelegate* delegate, Evacuator* evacuator) {
+ while (remaining_evacuation_items_.load(std::memory_order_relaxed) > 0) {
+ base::Optional<size_t> index = generator_.GetNext();
+ if (!index) return;
+ for (size_t i = *index; i < evacuation_items_.size(); ++i) {
+ auto& work_item = evacuation_items_[i];
+ if (!work_item.first.TryAcquire()) break;
+ evacuator->EvacuatePage(work_item.second);
+ if (remaining_evacuation_items_.fetch_sub(
+ 1, std::memory_order_relaxed) <= 1) {
+ return;
+ }
+ }
}
}
- private:
- void ProcessItems() {
- EvacuationItem* item = nullptr;
- while ((item = GetItem<EvacuationItem>()) != nullptr) {
- evacuator_->EvacuatePage(item->chunk());
- item->MarkFinished();
- }
+ size_t GetMaxConcurrency(size_t worker_count) const override {
+ const size_t kItemsPerWorker = MB / Page::kPageSize;
+ // Ceiling division to ensure enough workers for all
+ // |remaining_evacuation_items_|
+ const size_t wanted_num_workers =
+ (remaining_evacuation_items_.load(std::memory_order_relaxed) +
+ kItemsPerWorker - 1) /
+ kItemsPerWorker;
+ return std::min<size_t>(wanted_num_workers, evacuators_->size());
}
- Evacuator* evacuator_;
+
+ private:
+ std::vector<std::unique_ptr<Evacuator>>* evacuators_;
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items_;
+ std::atomic<size_t> remaining_evacuation_items_{0};
+ IndexGenerator generator_;
+
GCTracer* tracer_;
};
template <class Evacuator, class Collector>
void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
- Collector* collector, ItemParallelJob* job,
+ Collector* collector,
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
MigrationObserver* migration_observer, const intptr_t live_bytes) {
// Used for trace summary.
double compaction_speed = 0;
@@ -3173,31 +3172,33 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
const bool profiling = isolate()->LogObjectRelocation();
ProfilingMigrationObserver profiling_observer(heap());
- const int wanted_num_tasks =
- NumberOfParallelCompactionTasks(job->NumberOfItems());
- Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
+ const size_t pages_count = evacuation_items.size();
+ std::vector<std::unique_ptr<v8::internal::Evacuator>> evacuators;
+ const int wanted_num_tasks = NumberOfParallelCompactionTasks();
for (int i = 0; i < wanted_num_tasks; i++) {
- evacuators[i] = new Evacuator(collector);
- if (profiling) evacuators[i]->AddObserver(&profiling_observer);
+ auto evacuator = std::make_unique<Evacuator>(collector);
+ if (profiling) evacuator->AddObserver(&profiling_observer);
if (migration_observer != nullptr)
- evacuators[i]->AddObserver(migration_observer);
- job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
+ evacuator->AddObserver(migration_observer);
+ evacuators.push_back(std::move(evacuator));
}
- job->Run();
- for (int i = 0; i < wanted_num_tasks; i++) {
- evacuators[i]->Finalize();
- delete evacuators[i];
- }
- delete[] evacuators;
+ V8::GetCurrentPlatform()
+ ->PostJob(v8::TaskPriority::kUserBlocking,
+ std::make_unique<PageEvacuationJob>(
+ isolate(), &evacuators, std::move(evacuation_items)))
+ ->Join();
+
+ for (auto& evacuator : evacuators) evacuator->Finalize();
+ evacuators.clear();
if (FLAG_trace_evacuation) {
PrintIsolate(isolate(),
- "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
- "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8PRIdPTR
+ "%8.0f ms: evacuation-summary: parallel=%s pages=%zu "
+ "wanted_tasks=%d cores=%d live_bytes=%" V8PRIdPTR
" compaction_speed=%.f\n",
isolate()->time_millis_since_init(),
- FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
- wanted_num_tasks, job->NumberOfTasks(),
+ FLAG_parallel_compaction ? "yes" : "no", pages_count,
+ wanted_num_tasks,
V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1,
live_bytes, compaction_speed);
}
@@ -3214,8 +3215,7 @@ bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes,
}
void MarkCompactCollector::EvacuatePagesInParallel() {
- ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items;
intptr_t live_bytes = 0;
// Evacuation of new space pages cannot be aborted, so it needs to run
@@ -3238,12 +3238,12 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
- evacuation_job.AddItem(new EvacuationItem(page));
+ evacuation_items.emplace_back(ParallelWorkItem{}, page);
}
for (Page* page : old_space_evacuation_pages_) {
live_bytes += non_atomic_marking_state()->live_bytes(page);
- evacuation_job.AddItem(new EvacuationItem(page));
+ evacuation_items.emplace_back(ParallelWorkItem{}, page);
}
// Promote young generation large objects.
@@ -3259,18 +3259,18 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
if (marking_state->IsBlack(object)) {
heap_->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
- evacuation_job.AddItem(new EvacuationItem(current));
+ evacuation_items.emplace_back(ParallelWorkItem{}, current);
}
}
- if (evacuation_job.NumberOfItems() == 0) return;
+ if (evacuation_items.empty()) return;
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"MarkCompactCollector::EvacuatePagesInParallel", "pages",
- evacuation_job.NumberOfItems());
+ evacuation_items.size());
- CreateAndExecuteEvacuationTasks<FullEvacuator>(this, &evacuation_job, nullptr,
- live_bytes);
+ CreateAndExecuteEvacuationTasks<FullEvacuator>(
+ this, std::move(evacuation_items), nullptr, live_bytes);
// After evacuation there might still be swept pages that weren't
// added to one of the compaction space but still reside in the
@@ -3465,40 +3465,75 @@ void MarkCompactCollector::Evacuate() {
#endif
}
-class UpdatingItem : public ItemParallelJob::Item {
+class UpdatingItem : public ParallelWorkItem {
public:
- ~UpdatingItem() override = default;
+ virtual ~UpdatingItem() = default;
virtual void Process() = 0;
};
-class PointersUpdatingTask : public ItemParallelJob::Task {
+class PointersUpdatingJob : public v8::JobTask {
public:
- explicit PointersUpdatingTask(
- Isolate* isolate, GCTracer::Scope::ScopeId scope,
+ explicit PointersUpdatingJob(
+ Isolate* isolate,
+ std::vector<std::unique_ptr<UpdatingItem>> updating_items, int slots,
+ GCTracer::Scope::ScopeId scope,
GCTracer::BackgroundScope::ScopeId background_scope)
- : ItemParallelJob::Task(isolate),
+ : updating_items_(std::move(updating_items)),
+ remaining_updating_items_(updating_items_.size()),
+ generator_(updating_items_.size()),
+ slots_(slots),
tracer_(isolate->heap()->tracer()),
scope_(scope),
background_scope_(background_scope) {}
- void RunInParallel(Runner runner) override {
- if (runner == Runner::kForeground) {
+ void Run(JobDelegate* delegate) override {
+ if (delegate->IsJoiningThread()) {
TRACE_GC(tracer_, scope_);
- UpdatePointers();
+ UpdatePointers(delegate);
} else {
TRACE_BACKGROUND_GC(tracer_, background_scope_);
- UpdatePointers();
+ UpdatePointers(delegate);
}
}
- private:
- void UpdatePointers() {
- UpdatingItem* item = nullptr;
- while ((item = GetItem<UpdatingItem>()) != nullptr) {
- item->Process();
- item->MarkFinished();
+ void UpdatePointers(JobDelegate* delegate) {
+ while (remaining_updating_items_.load(std::memory_order_relaxed) > 0) {
+ base::Optional<size_t> index = generator_.GetNext();
+ if (!index) return;
+ for (size_t i = *index; i < updating_items_.size(); ++i) {
+ auto& work_item = updating_items_[i];
+ if (!work_item->TryAcquire()) break;
+ work_item->Process();
+ if (remaining_updating_items_.fetch_sub(1, std::memory_order_relaxed) <=
+ 1) {
+ return;
+ }
+ }
}
}
+
+ size_t GetMaxConcurrency(size_t worker_count) const override {
+ size_t items = remaining_updating_items_.load(std::memory_order_relaxed);
+ if (!FLAG_parallel_pointer_update) return items > 0;
+ const size_t kMaxPointerUpdateTasks = 8;
+ const size_t kSlotsPerTask = 600;
+ size_t wanted_tasks = items;
+ // Limit the number of update tasks as task creation often dominates the
+ // actual work that is being done.
+ if (slots_ >= 0) {
+ // Round up to ensure enough workers for all items.
+ wanted_tasks =
+ std::min<size_t>(items, (slots_ + kSlotsPerTask - 1) / kSlotsPerTask);
+ }
+ return std::min<size_t>(kMaxPointerUpdateTasks, wanted_tasks);
+ }
+
+ private:
+ std::vector<std::unique_ptr<UpdatingItem>> updating_items_;
+ std::atomic<size_t> remaining_updating_items_{0};
+ IndexGenerator generator_;
+ const int slots_;
+
GCTracer* tracer_;
GCTracer::Scope::ScopeId scope_;
GCTracer::BackgroundScope::ScopeId background_scope_;
@@ -3692,7 +3727,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
(chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(chunk_);
- const Isolate* isolate = heap_->isolate();
+ IsolateRoot isolate = heap_->isolate();
RememberedSet<OLD_TO_OLD>::Iterate(
chunk_,
[&filter, isolate](MaybeObjectSlot slot) {
@@ -3732,7 +3767,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
Address slot) {
// Using UpdateStrongSlot is OK here, because there are no weak
// typed slots.
- const Isolate* isolate = heap_->isolate();
+ IsolateRoot isolate = heap_->isolate();
return UpdateTypedSlotHelper::UpdateTypedSlot(
heap_, slot_type, slot, [isolate](FullMaybeObjectSlot slot) {
return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
@@ -3747,20 +3782,22 @@ class RememberedSetUpdatingItem : public UpdatingItem {
RememberedSetUpdatingMode updating_mode_;
};
-UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
+std::unique_ptr<UpdatingItem> MarkCompactCollector::CreateToSpaceUpdatingItem(
MemoryChunk* chunk, Address start, Address end) {
- return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
+ return std::make_unique<ToSpaceUpdatingItem<NonAtomicMarkingState>>(
chunk, start, end, non_atomic_marking_state());
}
-UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
+std::unique_ptr<UpdatingItem>
+MarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
- return new RememberedSetUpdatingItem<NonAtomicMarkingState, MARK_COMPACTOR>(
+ return std::make_unique<
+ RememberedSetUpdatingItem<NonAtomicMarkingState, MARK_COMPACTOR>>(
heap(), non_atomic_marking_state(), chunk, updating_mode);
}
int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
- ItemParallelJob* job) {
+ std::vector<std::unique_ptr<UpdatingItem>>* items) {
// Seed to space pages.
const Address space_start = heap()->new_space()->first_allocatable_address();
const Address space_end = heap()->new_space()->top();
@@ -3769,16 +3806,15 @@ int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
Address start =
page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end();
- job->AddItem(CreateToSpaceUpdatingItem(page, start, end));
+ items->emplace_back(CreateToSpaceUpdatingItem(page, start, end));
pages++;
}
- if (pages == 0) return 0;
- return NumberOfParallelToSpacePointerUpdateTasks(pages);
+ return pages;
}
template <typename IterateableSpace>
int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
- ItemParallelJob* job, IterateableSpace* space,
+ std::vector<std::unique_ptr<UpdatingItem>>* items, IterateableSpace* space,
RememberedSetUpdatingMode mode) {
int pages = 0;
for (MemoryChunk* chunk : *space) {
@@ -3802,7 +3838,7 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
contains_old_to_new_sweeping_slots ||
contains_old_to_old_invalidated_slots ||
contains_old_to_new_invalidated_slots) {
- job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
+ items->emplace_back(CreateRememberedSetUpdatingItem(chunk, mode));
pages++;
}
}
@@ -3876,35 +3912,29 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
- ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
-
- int remembered_set_pages = 0;
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->old_space(), RememberedSetUpdatingMode::ALL);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->code_space(), RememberedSetUpdatingMode::ALL);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->lo_space(), RememberedSetUpdatingMode::ALL);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->code_lo_space(), RememberedSetUpdatingMode::ALL);
- const int remembered_set_tasks =
- remembered_set_pages == 0
- ? 0
- : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
- old_to_new_slots_);
- const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
- const int num_ephemeron_table_updating_tasks = 1;
- const int num_tasks =
- Max(to_space_tasks,
- remembered_set_tasks + num_ephemeron_table_updating_tasks);
- for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpdatingTask(
- isolate(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
- GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
- }
- updating_job.AddItem(new EphemeronTableUpdatingItem(heap()));
- updating_job.Run();
+ std::vector<std::unique_ptr<UpdatingItem>> updating_items;
+
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->old_space(),
+ RememberedSetUpdatingMode::ALL);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->code_space(),
+ RememberedSetUpdatingMode::ALL);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->lo_space(),
+ RememberedSetUpdatingMode::ALL);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->code_lo_space(),
+ RememberedSetUpdatingMode::ALL);
+
+ CollectToSpaceUpdatingItems(&updating_items);
+ updating_items.push_back(
+ std::make_unique<EphemeronTableUpdatingItem>(heap()));
+
+ V8::GetCurrentPlatform()
+ ->PostJob(v8::TaskPriority::kUserBlocking,
+ std::make_unique<PointersUpdatingJob>(
+ isolate(), std::move(updating_items), old_to_new_slots_,
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
+ GCTracer::BackgroundScope::
+ MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
+ ->Join();
}
{
@@ -3914,27 +3944,19 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
// byte length which is potentially a HeapNumber.
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE);
- ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
-
- int array_buffer_pages = 0;
-
- int remembered_set_pages = 0;
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->map_space(), RememberedSetUpdatingMode::ALL);
- const int remembered_set_tasks =
- remembered_set_pages == 0
- ? 0
- : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
- old_to_new_slots_);
- const int num_tasks = Max(array_buffer_pages, remembered_set_tasks);
- if (num_tasks > 0) {
- for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpdatingTask(
- isolate(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
- GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
- }
- updating_job.Run();
+ std::vector<std::unique_ptr<UpdatingItem>> updating_items;
+
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
+ RememberedSetUpdatingMode::ALL);
+ if (!updating_items.empty()) {
+ V8::GetCurrentPlatform()
+ ->PostJob(v8::TaskPriority::kUserBlocking,
+ std::make_unique<PointersUpdatingJob>(
+ isolate(), std::move(updating_items), old_to_new_slots_,
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
+ GCTracer::BackgroundScope::
+ MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
+ ->Join();
}
}
@@ -4316,18 +4338,6 @@ MinorMarkCompactCollector::~MinorMarkCompactCollector() {
delete main_marking_visitor_;
}
-int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(int pages) {
- DCHECK_GT(pages, 0);
- if (!FLAG_minor_mc_parallel_marking) return 1;
- // Pages are not private to markers but we can still use them to estimate the
- // amount of marking that is required.
- const int kPagesPerTask = 2;
- const int wanted_tasks = Max(1, pages / kPagesPerTask);
- return Min(NumberOfAvailableCores(),
- Min(wanted_tasks,
- MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks));
-}
-
void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
for (Page* p : sweep_to_iterate_pages_) {
if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
@@ -4413,38 +4423,20 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
PointersUpdatingVisitor updating_visitor(isolate());
- ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
+ std::vector<std::unique_ptr<UpdatingItem>> updating_items;
// Create batches of global handles.
- const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
- int remembered_set_pages = 0;
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->old_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->code_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->map_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->lo_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->code_lo_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- const int remembered_set_tasks =
- remembered_set_pages == 0 ? 0
- : NumberOfParallelPointerUpdateTasks(
- remembered_set_pages, old_to_new_slots_);
- const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
- for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpdatingTask(
- isolate(), GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
- GCTracer::BackgroundScope::
- MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
- }
+ CollectToSpaceUpdatingItems(&updating_items);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->old_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->code_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->lo_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->code_lo_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
{
TRACE_GC(heap()->tracer(),
@@ -4456,7 +4448,15 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
- updating_job.Run();
+ V8::GetCurrentPlatform()
+ ->PostJob(
+ v8::TaskPriority::kUserBlocking,
+ std::make_unique<PointersUpdatingJob>(
+ isolate(), std::move(updating_items), old_to_new_slots_,
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
+ GCTracer::BackgroundScope::
+ MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
+ ->Join();
}
{
@@ -4704,56 +4704,41 @@ void MinorMarkCompactCollector::EvacuateEpilogue() {
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
-UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
- MemoryChunk* chunk, Address start, Address end) {
- return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
+std::unique_ptr<UpdatingItem>
+MinorMarkCompactCollector::CreateToSpaceUpdatingItem(MemoryChunk* chunk,
+ Address start,
+ Address end) {
+ return std::make_unique<ToSpaceUpdatingItem<NonAtomicMarkingState>>(
chunk, start, end, non_atomic_marking_state());
}
-UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
+std::unique_ptr<UpdatingItem>
+MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
- return new RememberedSetUpdatingItem<NonAtomicMarkingState,
- MINOR_MARK_COMPACTOR>(
+ return std::make_unique<
+ RememberedSetUpdatingItem<NonAtomicMarkingState, MINOR_MARK_COMPACTOR>>(
heap(), non_atomic_marking_state(), chunk, updating_mode);
}
-class MarkingItem;
class PageMarkingItem;
class RootMarkingItem;
class YoungGenerationMarkingTask;
-class MarkingItem : public ItemParallelJob::Item {
- public:
- ~MarkingItem() override = default;
- virtual void Process(YoungGenerationMarkingTask* task) = 0;
-};
-
-class YoungGenerationMarkingTask : public ItemParallelJob::Task {
+class YoungGenerationMarkingTask {
public:
YoungGenerationMarkingTask(
Isolate* isolate, MinorMarkCompactCollector* collector,
MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
- : ItemParallelJob::Task(isolate),
- collector_(collector),
- marking_worklist_(global_worklist, task_id),
+ : marking_worklist_(global_worklist, task_id),
marking_state_(collector->marking_state()),
visitor_(marking_state_, global_worklist, task_id) {
local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
Page::kPageSize);
}
- void RunInParallel(Runner runner) override {
- if (runner == Runner::kForeground) {
- TRACE_GC(collector_->heap()->tracer(),
- GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
- ProcessItems();
- } else {
- TRACE_BACKGROUND_GC(
- collector_->heap()->tracer(),
- GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
- ProcessItems();
- }
- }
+ int slots() const { return slots_; }
+
+ void IncrementSlots() { ++slots_; }
void MarkObject(Object object) {
if (!Heap::InYoungGeneration(object)) return;
@@ -4764,34 +4749,6 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
}
}
- private:
- void ProcessItems() {
- double marking_time = 0.0;
- {
- TimedScope scope(&marking_time);
- MarkingItem* item = nullptr;
- while ((item = GetItem<MarkingItem>()) != nullptr) {
- item->Process(this);
- item->MarkFinished();
- EmptyLocalMarkingWorklist();
- }
- EmptyMarkingWorklist();
- DCHECK(marking_worklist_.IsLocalEmpty());
- FlushLiveBytes();
- }
- if (FLAG_trace_minor_mc_parallel_marking) {
- PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
- static_cast<void*>(this), marking_time);
- }
- }
- void EmptyLocalMarkingWorklist() {
- HeapObject object;
- while (marking_worklist_.Pop(&object)) {
- const int size = visitor_.Visit(object);
- IncrementLiveBytes(object, size);
- }
- }
-
void EmptyMarkingWorklist() {
HeapObject object;
while (marking_worklist_.Pop(&object)) {
@@ -4810,20 +4767,20 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
}
}
- MinorMarkCompactCollector* collector_;
+ private:
MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
MinorMarkCompactCollector::MarkingState* marking_state_;
YoungGenerationMarkingVisitor visitor_;
std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
+ int slots_ = 0;
};
-class PageMarkingItem : public MarkingItem {
+class PageMarkingItem : public ParallelWorkItem {
public:
- explicit PageMarkingItem(MemoryChunk* chunk, std::atomic<int>* global_slots)
- : chunk_(chunk), global_slots_(global_slots), slots_(0) {}
- ~PageMarkingItem() override { *global_slots_ = *global_slots_ + slots_; }
+ explicit PageMarkingItem(MemoryChunk* chunk) : chunk_(chunk) {}
+ ~PageMarkingItem() = default;
- void Process(YoungGenerationMarkingTask* task) override {
+ void Process(YoungGenerationMarkingTask* task) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"PageMarkingItem::Process");
base::MutexGuard guard(chunk_->mutex());
@@ -4880,23 +4837,102 @@ class PageMarkingItem : public MarkingItem {
USE(success);
DCHECK(success);
task->MarkObject(heap_object);
- slots_++;
+ task->IncrementSlots();
return KEEP_SLOT;
}
return REMOVE_SLOT;
}
MemoryChunk* chunk_;
- std::atomic<int>* global_slots_;
- int slots_;
+};
+
+class YoungGenerationMarkingJob : public v8::JobTask {
+ public:
+ YoungGenerationMarkingJob(
+ Isolate* isolate, MinorMarkCompactCollector* collector,
+ MinorMarkCompactCollector::MarkingWorklist* global_worklist,
+ std::vector<PageMarkingItem> marking_items, std::atomic<int>* slots)
+ : isolate_(isolate),
+ collector_(collector),
+ global_worklist_(global_worklist),
+ marking_items_(std::move(marking_items)),
+ remaining_marking_items_(marking_items_.size()),
+ generator_(marking_items_.size()),
+ slots_(slots) {}
+
+ void Run(JobDelegate* delegate) override {
+ if (delegate->IsJoiningThread()) {
+ TRACE_GC(collector_->heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
+ ProcessItems(delegate);
+ } else {
+ TRACE_BACKGROUND_GC(
+ collector_->heap()->tracer(),
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
+ ProcessItems(delegate);
+ }
+ }
+
+ size_t GetMaxConcurrency(size_t worker_count) const override {
+ // Pages are not private to markers but we can still use them to estimate
+ // the amount of marking that is required.
+ const int kPagesPerTask = 2;
+ size_t items = remaining_marking_items_.load(std::memory_order_relaxed);
+ size_t num_tasks = std::max((items + 1) / kPagesPerTask,
+ global_worklist_->GlobalPoolSize());
+ return std::min<size_t>(
+ num_tasks, MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks);
+ }
+
+ private:
+ void ProcessItems(JobDelegate* delegate) {
+ double marking_time = 0.0;
+ {
+ TimedScope scope(&marking_time);
+ YoungGenerationMarkingTask task(isolate_, collector_, global_worklist_,
+ delegate->GetTaskId());
+ ProcessMarkingItems(&task);
+ task.EmptyMarkingWorklist();
+ task.FlushLiveBytes();
+ *slots_ += task.slots();
+ }
+ if (FLAG_trace_minor_mc_parallel_marking) {
+ PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
+ static_cast<void*>(this), marking_time);
+ }
+ }
+
+ void ProcessMarkingItems(YoungGenerationMarkingTask* task) {
+ while (remaining_marking_items_.load(std::memory_order_relaxed) > 0) {
+ base::Optional<size_t> index = generator_.GetNext();
+ if (!index) return;
+ for (size_t i = *index; i < marking_items_.size(); ++i) {
+ auto& work_item = marking_items_[i];
+ if (!work_item.TryAcquire()) break;
+ work_item.Process(task);
+ task->EmptyMarkingWorklist();
+ if (remaining_marking_items_.fetch_sub(1, std::memory_order_relaxed) <=
+ 1) {
+ return;
+ }
+ }
+ }
+ }
+
+ Isolate* isolate_;
+ MinorMarkCompactCollector* collector_;
+ MinorMarkCompactCollector::MarkingWorklist* global_worklist_;
+ std::vector<PageMarkingItem> marking_items_;
+ std::atomic_size_t remaining_marking_items_{0};
+ IndexGenerator generator_;
+ std::atomic<int>* slots_;
};
void MinorMarkCompactCollector::MarkRootSetInParallel(
RootMarkingVisitor* root_visitor) {
std::atomic<int> slots;
{
- ItemParallelJob job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
+ std::vector<PageMarkingItem> marking_items;
// Seed the root set (roots + old->new set).
{
@@ -4914,22 +4950,25 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
root_visitor);
// Create items for each page.
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- heap(), [&job, &slots](MemoryChunk* chunk) {
- job.AddItem(new PageMarkingItem(chunk, &slots));
+ heap(), [&marking_items](MemoryChunk* chunk) {
+ marking_items.emplace_back(chunk);
});
}
// Add tasks and run in parallel.
{
+ // The main thread might hold local items, while GlobalPoolSize() == 0.
+ // Flush to ensure these items are visible globally and picked up by the
+ // job.
+ worklist()->FlushToGlobal(kMainThreadTask);
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
- const int new_space_pages =
- static_cast<int>(heap()->new_space()->Capacity()) / Page::kPageSize;
- const int num_tasks = NumberOfParallelMarkingTasks(new_space_pages);
- for (int i = 0; i < num_tasks; i++) {
- job.AddTask(
- new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
- }
- job.Run();
+ V8::GetCurrentPlatform()
+ ->PostJob(v8::TaskPriority::kUserBlocking,
+ std::make_unique<YoungGenerationMarkingJob>(
+ isolate(), this, worklist(), std::move(marking_items),
+ &slots))
+ ->Join();
+
DCHECK(worklist()->IsEmpty());
}
}
@@ -5161,8 +5200,7 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
} // namespace
void MinorMarkCompactCollector::EvacuatePagesInParallel() {
- ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items;
intptr_t live_bytes = 0;
for (Page* page : new_space_evacuation_pages_) {
@@ -5176,7 +5214,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
- evacuation_job.AddItem(new EvacuationItem(page));
+ evacuation_items.emplace_back(ParallelWorkItem{}, page);
}
// Promote young generation large objects.
@@ -5189,15 +5227,15 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
if (non_atomic_marking_state_.IsGrey(object)) {
heap_->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
- evacuation_job.AddItem(new EvacuationItem(current));
+ evacuation_items.emplace_back(ParallelWorkItem{}, current);
}
}
- if (evacuation_job.NumberOfItems() == 0) return;
+ if (evacuation_items.empty()) return;
YoungGenerationMigrationObserver observer(heap(),
heap()->mark_compact_collector());
CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
- this, &evacuation_job, &observer, live_bytes);
+ this, std::move(evacuation_items), &observer, live_bytes);
}
#endif // ENABLE_MINOR_MC
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index d369ac0183..4d598f71ff 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -13,6 +13,7 @@
#include "src/heap/marking-worklist.h"
#include "src/heap/marking.h"
#include "src/heap/memory-measurement.h"
+#include "src/heap/parallel-work-item.h"
#include "src/heap/spaces.h"
#include "src/heap/sweeper.h"
@@ -31,7 +32,8 @@ class YoungGenerationMarkingVisitor;
class MarkBitCellIterator {
public:
- MarkBitCellIterator(MemoryChunk* chunk, Bitmap* bitmap) : chunk_(chunk) {
+ MarkBitCellIterator(const MemoryChunk* chunk, Bitmap* bitmap)
+ : chunk_(chunk) {
last_cell_index_ =
Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(chunk_->area_end()));
cell_base_ = chunk_->address();
@@ -82,7 +84,7 @@ class MarkBitCellIterator {
}
private:
- MemoryChunk* chunk_;
+ const MemoryChunk* chunk_;
MarkBit::CellType* cells_;
unsigned int last_cell_index_;
unsigned int cell_index_;
@@ -101,7 +103,7 @@ class LiveObjectRange {
using reference = const value_type&;
using iterator_category = std::forward_iterator_tag;
- inline iterator(MemoryChunk* chunk, Bitmap* bitmap, Address start);
+ inline iterator(const MemoryChunk* chunk, Bitmap* bitmap, Address start);
inline iterator& operator++();
inline iterator operator++(int);
@@ -119,7 +121,7 @@ class LiveObjectRange {
private:
inline void AdvanceToNextValidObject();
- MemoryChunk* const chunk_;
+ const MemoryChunk* const chunk_;
Map const one_word_filler_map_;
Map const two_word_filler_map_;
Map const free_space_map_;
@@ -130,7 +132,7 @@ class LiveObjectRange {
int current_size_;
};
- LiveObjectRange(MemoryChunk* chunk, Bitmap* bitmap)
+ LiveObjectRange(const MemoryChunk* chunk, Bitmap* bitmap)
: chunk_(chunk),
bitmap_(bitmap),
start_(chunk_->area_start()),
@@ -142,7 +144,7 @@ class LiveObjectRange {
inline iterator end();
private:
- MemoryChunk* const chunk_;
+ const MemoryChunk* const chunk_;
Bitmap* bitmap_;
Address start_;
Address end_;
@@ -213,30 +215,28 @@ class MarkCompactCollectorBase {
virtual void Evacuate() = 0;
virtual void EvacuatePagesInParallel() = 0;
virtual void UpdatePointersAfterEvacuation() = 0;
- virtual UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk,
- Address start,
- Address end) = 0;
- virtual UpdatingItem* CreateRememberedSetUpdatingItem(
+ virtual std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(
+ MemoryChunk* chunk, Address start, Address end) = 0;
+ virtual std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
template <class Evacuator, class Collector>
- void CreateAndExecuteEvacuationTasks(Collector* collector,
- ItemParallelJob* job,
- MigrationObserver* migration_observer,
- const intptr_t live_bytes);
+ void CreateAndExecuteEvacuationTasks(
+ Collector* collector,
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
+ MigrationObserver* migration_observer, const intptr_t live_bytes);
// Returns whether this page should be moved according to heuristics.
bool ShouldMovePage(Page* p, intptr_t live_bytes, bool promote_young);
- int CollectToSpaceUpdatingItems(ItemParallelJob* job);
+ int CollectToSpaceUpdatingItems(
+ std::vector<std::unique_ptr<UpdatingItem>>* items);
template <typename IterateableSpace>
- int CollectRememberedSetUpdatingItems(ItemParallelJob* job,
- IterateableSpace* space,
- RememberedSetUpdatingMode mode);
+ int CollectRememberedSetUpdatingItems(
+ std::vector<std::unique_ptr<UpdatingItem>>* items,
+ IterateableSpace* space, RememberedSetUpdatingMode mode);
- int NumberOfParallelCompactionTasks(int pages);
- int NumberOfParallelPointerUpdateTasks(int pages, int slots);
- int NumberOfParallelToSpacePointerUpdateTasks(int pages);
+ int NumberOfParallelCompactionTasks();
Heap* heap_;
// Number of old to new slots. Should be computed during MarkLiveObjects.
@@ -434,11 +434,11 @@ class MainMarkingVisitor final
// Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase {
public:
-#ifdef V8_CONCURRENT_MARKING
+#ifdef V8_ATOMIC_MARKING_STATE
using MarkingState = MajorMarkingState;
#else
using MarkingState = MajorNonAtomicMarkingState;
-#endif // V8_CONCURRENT_MARKING
+#endif // V8_ATOMIC_MARKING_STATE
using AtomicMarkingState = MajorAtomicMarkingState;
using NonAtomicMarkingState = MajorNonAtomicMarkingState;
@@ -478,7 +478,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Stop concurrent marking (either by preempting it right away or waiting for
// it to complete as requested by |stop_request|).
- void FinishConcurrentMarking(ConcurrentMarking::StopRequest stop_request);
+ void FinishConcurrentMarking();
bool StartCompaction();
@@ -710,9 +710,10 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override;
- UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
- Address end) override;
- UpdatingItem* CreateRememberedSetUpdatingItem(
+ std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk,
+ Address start,
+ Address end) override;
+ std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
void ReleaseEvacuationCandidates();
@@ -851,13 +852,12 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override;
- UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
- Address end) override;
- UpdatingItem* CreateRememberedSetUpdatingItem(
+ std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk,
+ Address start,
+ Address end) override;
+ std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
- int NumberOfParallelMarkingTasks(int pages);
-
void SweepArrayBufferExtensions();
MarkingWorklist* worklist_;
@@ -871,6 +871,7 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
NonAtomicMarkingState non_atomic_marking_state_;
friend class YoungGenerationMarkingTask;
+ friend class YoungGenerationMarkingJob;
friend class YoungGenerationMarkingVisitor;
};
diff --git a/deps/v8/src/heap/marking-visitor-inl.h b/deps/v8/src/heap/marking-visitor-inl.h
index 532e1c9fd5..bdc955b4bb 100644
--- a/deps/v8/src/heap/marking-visitor-inl.h
+++ b/deps/v8/src/heap/marking-visitor-inl.h
@@ -9,6 +9,8 @@
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
+#include "src/objects/objects.h"
+#include "src/snapshot/deserializer.h"
namespace v8 {
namespace internal {
@@ -349,8 +351,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitWeakCell(
// ===========================================================================
template <typename ConcreteVisitor, typename MarkingState>
-size_t
-MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkDescriptorArrayBlack(
+int MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkDescriptorArrayBlack(
DescriptorArray descriptors) {
concrete_visitor()->marking_state()->WhiteToGrey(descriptors);
if (concrete_visitor()->marking_state()->GreyToBlack(descriptors)) {
@@ -389,36 +390,64 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitDescriptorArray(
}
template <typename ConcreteVisitor, typename MarkingState>
+int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitDescriptorsForMap(
+ Map map) {
+ if (!map.CanTransition()) return 0;
+
+ // Maps that can transition share their descriptor arrays and require
+ // special visiting logic to avoid memory leaks.
+ // Since descriptor arrays are potentially shared, ensure that only the
+ // descriptors that belong to this map are marked. The first time a
+ // non-empty descriptor array is marked, its header is also visited. The
+ // slot holding the descriptor array will be implicitly recorded when the
+ // pointer fields of this map are visited.
+
+ Object maybe_descriptors =
+ TaggedField<Object, Map::kInstanceDescriptorsOffset>::Acquire_Load(
+ heap_->isolate(), map);
+
+ // If the descriptors are a Smi, then this Map is in the process of being
+ // deserialized, and doesn't yet have an initialized descriptor field.
+ if (maybe_descriptors.IsSmi()) {
+ DCHECK_EQ(maybe_descriptors, Deserializer::uninitialized_field_value());
+ return 0;
+ }
+
+ DescriptorArray descriptors = DescriptorArray::cast(maybe_descriptors);
+
+ // Don't do any special processing of strong descriptor arrays, let them get
+ // marked through the normal visitor mechanism.
+ if (descriptors.IsStrongDescriptorArray()) {
+ return 0;
+ }
+
+ int size = MarkDescriptorArrayBlack(descriptors);
+ int number_of_own_descriptors = map.NumberOfOwnDescriptors();
+ if (number_of_own_descriptors) {
+ // It is possible that the concurrent marker observes the
+ // number_of_own_descriptors out of sync with the descriptors. In that
+ // case the marking write barrier for the descriptor array will ensure
+ // that all required descriptors are marked. The concurrent marker
+ // just should avoid crashing in that case. That's why we need the
+ // std::min<int>() below.
+ VisitDescriptors(descriptors,
+ std::min<int>(number_of_own_descriptors,
+ descriptors.number_of_descriptors()));
+ }
+
+ return size;
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitMap(Map meta_map,
Map map) {
if (!concrete_visitor()->ShouldVisit(map)) return 0;
int size = Map::BodyDescriptor::SizeOf(meta_map, map);
- if (map.CanTransition()) {
- // Maps that can transition share their descriptor arrays and require
- // special visiting logic to avoid memory leaks.
- // Since descriptor arrays are potentially shared, ensure that only the
- // descriptors that belong to this map are marked. The first time a
- // non-empty descriptor array is marked, its header is also visited. The
- // slot holding the descriptor array will be implicitly recorded when the
- // pointer fields of this map are visited.
- DescriptorArray descriptors = map.synchronized_instance_descriptors();
- size += MarkDescriptorArrayBlack(descriptors);
- int number_of_own_descriptors = map.NumberOfOwnDescriptors();
- if (number_of_own_descriptors) {
- // It is possible that the concurrent marker observes the
- // number_of_own_descriptors out of sync with the descriptors. In that
- // case the marking write barrier for the descriptor array will ensure
- // that all required descriptors are marked. The concurrent marker
- // just should avoid crashing in that case. That's why we need the
- // std::min<int>() below.
- VisitDescriptors(descriptors,
- std::min<int>(number_of_own_descriptors,
- descriptors.number_of_descriptors()));
- }
- // Mark the pointer fields of the Map. Since the transitions array has
- // been marked already, it is fine that one of these fields contains a
- // pointer to it.
- }
+ size += VisitDescriptorsForMap(map);
+
+ // Mark the pointer fields of the Map. If there is a transitions array, it has
+ // been marked already, so it is fine that one of these fields contains a
+ // pointer to it.
Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
return size;
}
diff --git a/deps/v8/src/heap/marking-visitor.h b/deps/v8/src/heap/marking-visitor.h
index 3707fc6031..45dda338d0 100644
--- a/deps/v8/src/heap/marking-visitor.h
+++ b/deps/v8/src/heap/marking-visitor.h
@@ -11,58 +11,12 @@
#include "src/heap/memory-chunk.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
+#include "src/heap/weak-object-worklists.h"
#include "src/heap/worklist.h"
-#include "src/objects/heap-object.h" // For Worklist<HeapObject, ...>
-#include "src/objects/js-weak-refs.h" // For Worklist<WeakCell, ...>
namespace v8 {
namespace internal {
-struct Ephemeron {
- HeapObject key;
- HeapObject value;
-};
-
-using EphemeronWorklist = Worklist<Ephemeron, 64>;
-
-// Weak objects encountered during marking.
-struct WeakObjects {
- Worklist<TransitionArray, 64> transition_arrays;
-
- // Keep track of all EphemeronHashTables in the heap to process
- // them in the atomic pause.
- Worklist<EphemeronHashTable, 64> ephemeron_hash_tables;
-
- // Keep track of all ephemerons for concurrent marking tasks. Only store
- // ephemerons in these Worklists if both key and value are unreachable at the
- // moment.
- //
- // MarkCompactCollector::ProcessEphemeronsUntilFixpoint drains and fills these
- // worklists.
- //
- // current_ephemerons is used as draining worklist in the current fixpoint
- // iteration.
- EphemeronWorklist current_ephemerons;
-
- // Stores ephemerons to visit in the next fixpoint iteration.
- EphemeronWorklist next_ephemerons;
-
- // When draining the marking worklist new discovered ephemerons are pushed
- // into this worklist.
- EphemeronWorklist discovered_ephemerons;
-
- // TODO(marja): For old space, we only need the slot, not the host
- // object. Optimize this by adding a different storage for old space.
- Worklist<std::pair<HeapObject, HeapObjectSlot>, 64> weak_references;
- Worklist<std::pair<HeapObject, Code>, 64> weak_objects_in_code;
-
- Worklist<JSWeakRef, 64> js_weak_refs;
- Worklist<WeakCell, 64> weak_cells;
-
- Worklist<SharedFunctionInfo, 64> bytecode_flushing_candidates;
- Worklist<JSFunction, 64> flushed_js_functions;
-};
-
struct EphemeronMarking {
std::vector<HeapObject> newly_discovered;
bool newly_discovered_overflowed;
@@ -220,6 +174,9 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
V8_INLINE void VisitDescriptors(DescriptorArray descriptors,
int number_of_own_descriptors);
+
+ V8_INLINE int VisitDescriptorsForMap(Map map);
+
template <typename T>
int VisitEmbedderTracingSubclass(Map map, T object);
V8_INLINE int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
@@ -227,7 +184,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
// Marks the descriptor array black without pushing it on the marking work
// list and visits its header. Returns the size of the descriptor array
// if it was successully marked as black.
- V8_INLINE size_t MarkDescriptorArrayBlack(DescriptorArray descriptors);
+ V8_INLINE int MarkDescriptorArrayBlack(DescriptorArray descriptors);
// Marks the object grey and pushes it on the marking work list.
V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
diff --git a/deps/v8/src/heap/memory-allocator.cc b/deps/v8/src/heap/memory-allocator.cc
index 2c9daa3ec4..a3d4f0029e 100644
--- a/deps/v8/src/heap/memory-allocator.cc
+++ b/deps/v8/src/heap/memory-allocator.cc
@@ -154,68 +154,55 @@ void MemoryAllocator::TearDown() {
data_page_allocator_ = nullptr;
}
-class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
+class MemoryAllocator::Unmapper::UnmapFreeMemoryJob : public JobTask {
public:
- explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
- : CancelableTask(isolate),
- unmapper_(unmapper),
- tracer_(isolate->heap()->tracer()) {}
+ explicit UnmapFreeMemoryJob(Isolate* isolate, Unmapper* unmapper)
+ : unmapper_(unmapper), tracer_(isolate->heap()->tracer()) {}
- private:
- void RunInternal() override {
+ void Run(JobDelegate* delegate) override {
TRACE_BACKGROUND_GC(tracer_,
GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
- unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
- unmapper_->active_unmapping_tasks_--;
- unmapper_->pending_unmapping_tasks_semaphore_.Signal();
+ unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>(
+ delegate);
if (FLAG_trace_unmapper) {
- PrintIsolate(unmapper_->heap_->isolate(),
- "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
+ PrintIsolate(unmapper_->heap_->isolate(), "UnmapFreeMemoryTask Done\n");
}
}
+ size_t GetMaxConcurrency(size_t worker_count) const override {
+ const size_t kTaskPerChunk = 8;
+ return std::min<size_t>(
+ kMaxUnmapperTasks,
+ worker_count +
+ (unmapper_->NumberOfCommittedChunks() + kTaskPerChunk - 1) /
+ kTaskPerChunk);
+ }
+
+ private:
Unmapper* const unmapper_;
GCTracer* const tracer_;
- DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
+ DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryJob);
};
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
- if (!MakeRoomForNewTasks()) {
- // kMaxUnmapperTasks are already running. Avoid creating any more.
+ if (job_handle_ && job_handle_->IsValid()) {
+ job_handle_->NotifyConcurrencyIncrease();
+ } else {
+ job_handle_ = V8::GetCurrentPlatform()->PostJob(
+ TaskPriority::kUserVisible,
+ std::make_unique<UnmapFreeMemoryJob>(heap_->isolate(), this));
if (FLAG_trace_unmapper) {
- PrintIsolate(heap_->isolate(),
- "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
- kMaxUnmapperTasks);
+ PrintIsolate(heap_->isolate(), "Unmapper::FreeQueuedChunks: new Job\n");
}
- return;
- }
- auto task = std::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
- if (FLAG_trace_unmapper) {
- PrintIsolate(heap_->isolate(),
- "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
- task->id());
}
- DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
- DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
- DCHECK_GE(active_unmapping_tasks_, 0);
- active_unmapping_tasks_++;
- task_ids_[pending_unmapping_tasks_++] = task->id();
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
} else {
PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
}
}
void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
- for (int i = 0; i < pending_unmapping_tasks_; i++) {
- if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
- TryAbortResult::kTaskAborted) {
- pending_unmapping_tasks_semaphore_.Wait();
- }
- }
- pending_unmapping_tasks_ = 0;
- active_unmapping_tasks_ = 0;
+ if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
if (FLAG_trace_unmapper) {
PrintIsolate(
@@ -234,26 +221,18 @@ void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
}
-bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
- DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
-
- if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
- // All previous unmapping tasks have been run to completion.
- // Finalize those tasks to make room for new ones.
- CancelAndWaitForPendingTasks();
- }
- return pending_unmapping_tasks_ != kMaxUnmapperTasks;
-}
-
-void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
+void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks(
+ JobDelegate* delegate) {
MemoryChunk* chunk = nullptr;
while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
allocator_->PerformFreeMemory(chunk);
+ if (delegate && delegate->ShouldYield()) return;
}
}
template <MemoryAllocator::Unmapper::FreeMode mode>
-void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
+void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
+ JobDelegate* delegate) {
MemoryChunk* chunk = nullptr;
if (FLAG_trace_unmapper) {
PrintIsolate(
@@ -266,6 +245,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
allocator_->PerformFreeMemory(chunk);
if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
+ if (delegate && delegate->ShouldYield()) return;
}
if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
// The previous loop uncommitted any pages marked as pooled and added them
@@ -273,13 +253,14 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
// though.
while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
+ if (delegate && delegate->ShouldYield()) return;
}
}
PerformFreeMemoryOnQueuedNonRegularChunks();
}
void MemoryAllocator::Unmapper::TearDown() {
- CHECK_EQ(0, pending_unmapping_tasks_);
+ CHECK(!job_handle_ || !job_handle_->IsValid());
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
for (int i = 0; i < kNumberOfChunkQueues; i++) {
DCHECK(chunks_[i].empty());
diff --git a/deps/v8/src/heap/memory-allocator.h b/deps/v8/src/heap/memory-allocator.h
index 7f95c49629..179877e753 100644
--- a/deps/v8/src/heap/memory-allocator.h
+++ b/deps/v8/src/heap/memory-allocator.h
@@ -61,14 +61,10 @@ class MemoryAllocator {
// chunks.
class Unmapper {
public:
- class UnmapFreeMemoryTask;
+ class UnmapFreeMemoryJob;
Unmapper(Heap* heap, MemoryAllocator* allocator)
- : heap_(heap),
- allocator_(allocator),
- pending_unmapping_tasks_semaphore_(0),
- pending_unmapping_tasks_(0),
- active_unmapping_tasks_(0) {
+ : heap_(heap), allocator_(allocator) {
chunks_[kRegular].reserve(kReservedQueueingSlots);
chunks_[kPooled].reserve(kReservedQueueingSlots);
}
@@ -142,18 +138,16 @@ class MemoryAllocator {
bool MakeRoomForNewTasks();
template <FreeMode mode>
- void PerformFreeMemoryOnQueuedChunks();
+ void PerformFreeMemoryOnQueuedChunks(JobDelegate* delegate = nullptr);
- void PerformFreeMemoryOnQueuedNonRegularChunks();
+ void PerformFreeMemoryOnQueuedNonRegularChunks(
+ JobDelegate* delegate = nullptr);
Heap* const heap_;
MemoryAllocator* const allocator_;
base::Mutex mutex_;
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
- CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
- base::Semaphore pending_unmapping_tasks_semaphore_;
- intptr_t pending_unmapping_tasks_;
- std::atomic<intptr_t> active_unmapping_tasks_;
+ std::unique_ptr<v8::JobHandle> job_handle_;
friend class MemoryAllocator;
};
diff --git a/deps/v8/src/heap/memory-chunk-layout.cc b/deps/v8/src/heap/memory-chunk-layout.cc
index d4e1d1267e..e89a01fb0a 100644
--- a/deps/v8/src/heap/memory-chunk-layout.cc
+++ b/deps/v8/src/heap/memory-chunk-layout.cc
@@ -37,7 +37,6 @@ intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
- DCHECK_LE(kMaxRegularHeapObjectSize, memory);
return memory;
}
@@ -67,5 +66,11 @@ size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
return AllocatableMemoryInDataPage();
}
+int MemoryChunkLayout::MaxRegularCodeObjectSize() {
+ int size = static_cast<int>(AllocatableMemoryInCodePage() / 2);
+ DCHECK_LE(size, kMaxRegularHeapObjectSize);
+ return size;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/memory-chunk-layout.h b/deps/v8/src/heap/memory-chunk-layout.h
index 0a95c70989..41512cbbce 100644
--- a/deps/v8/src/heap/memory-chunk-layout.h
+++ b/deps/v8/src/heap/memory-chunk-layout.h
@@ -83,6 +83,8 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
static size_t AllocatableMemoryInDataPage();
static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
+
+ static int MaxRegularCodeObjectSize();
};
} // namespace internal
diff --git a/deps/v8/src/heap/memory-chunk.h b/deps/v8/src/heap/memory-chunk.h
index ba6c06d026..66196c1f13 100644
--- a/deps/v8/src/heap/memory-chunk.h
+++ b/deps/v8/src/heap/memory-chunk.h
@@ -59,7 +59,6 @@ class MemoryChunk : public BasicMemoryChunk {
// Only works if the object is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromHeapObject(HeapObject o) {
- DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return cast(BasicMemoryChunk::FromHeapObject(o));
}
diff --git a/deps/v8/src/heap/memory-measurement-inl.h b/deps/v8/src/heap/memory-measurement-inl.h
index 905623e744..f6c75b6ca6 100644
--- a/deps/v8/src/heap/memory-measurement-inl.h
+++ b/deps/v8/src/heap/memory-measurement-inl.h
@@ -20,13 +20,13 @@ bool NativeContextInferrer::Infer(Isolate* isolate, Map map, HeapObject object,
Address* native_context) {
switch (map.visitor_id()) {
case kVisitContext:
- *native_context = Context::cast(object).native_context().ptr();
- return true;
+ return InferForContext(isolate, Context::cast(object), native_context);
case kVisitNativeContext:
*native_context = object.ptr();
return true;
case kVisitJSFunction:
- return InferForJSFunction(JSFunction::cast(object), native_context);
+ return InferForJSFunction(isolate, JSFunction::cast(object),
+ native_context);
case kVisitJSApiObject:
case kVisitJSArrayBuffer:
case kVisitJSObject:
diff --git a/deps/v8/src/heap/memory-measurement.cc b/deps/v8/src/heap/memory-measurement.cc
index 4b8f13e6bb..5f79439b05 100644
--- a/deps/v8/src/heap/memory-measurement.cc
+++ b/deps/v8/src/heap/memory-measurement.cc
@@ -337,15 +337,37 @@ std::unique_ptr<v8::MeasureMemoryDelegate> MemoryMeasurement::DefaultDelegate(
mode);
}
-bool NativeContextInferrer::InferForJSFunction(JSFunction function,
- Address* native_context) {
- if (function.has_context()) {
- *native_context = function.context().native_context().ptr();
+bool NativeContextInferrer::InferForContext(Isolate* isolate, Context context,
+ Address* native_context) {
+ Map context_map = context.synchronized_map();
+ Object maybe_native_context =
+ TaggedField<Object, Map::kConstructorOrBackPointerOrNativeContextOffset>::
+ Acquire_Load(isolate, context_map);
+ if (maybe_native_context.IsNativeContext()) {
+ *native_context = maybe_native_context.ptr();
return true;
}
return false;
}
+bool NativeContextInferrer::InferForJSFunction(Isolate* isolate,
+ JSFunction function,
+ Address* native_context) {
+ Object maybe_context =
+ TaggedField<Object, JSFunction::kContextOffset>::Acquire_Load(isolate,
+ function);
+ // The context may be a smi during deserialization.
+ if (maybe_context.IsSmi()) {
+ DCHECK_EQ(maybe_context, Deserializer::uninitialized_field_value());
+ return false;
+ }
+ if (!maybe_context.IsContext()) {
+ // The function does not have a context.
+ return false;
+ }
+ return InferForContext(isolate, Context::cast(maybe_context), native_context);
+}
+
bool NativeContextInferrer::InferForJSObject(Isolate* isolate, Map map,
JSObject object,
Address* native_context) {
@@ -361,7 +383,7 @@ bool NativeContextInferrer::InferForJSObject(Isolate* isolate, Map map,
const int kMaxSteps = 3;
Object maybe_constructor = map.TryGetConstructor(isolate, kMaxSteps);
if (maybe_constructor.IsJSFunction()) {
- return InferForJSFunction(JSFunction::cast(maybe_constructor),
+ return InferForJSFunction(isolate, JSFunction::cast(maybe_constructor),
native_context);
}
return false;
diff --git a/deps/v8/src/heap/memory-measurement.h b/deps/v8/src/heap/memory-measurement.h
index e71bdc1cfe..cf72c57abd 100644
--- a/deps/v8/src/heap/memory-measurement.h
+++ b/deps/v8/src/heap/memory-measurement.h
@@ -73,7 +73,10 @@ class V8_EXPORT_PRIVATE NativeContextInferrer {
Address* native_context);
private:
- bool InferForJSFunction(JSFunction function, Address* native_context);
+ bool InferForContext(Isolate* isolate, Context context,
+ Address* native_context);
+ bool InferForJSFunction(Isolate* isolate, JSFunction function,
+ Address* native_context);
bool InferForJSObject(Isolate* isolate, Map map, JSObject object,
Address* native_context);
};
diff --git a/deps/v8/src/heap/new-spaces.cc b/deps/v8/src/heap/new-spaces.cc
index 98a8c715f4..f1f31d6713 100644
--- a/deps/v8/src/heap/new-spaces.cc
+++ b/deps/v8/src/heap/new-spaces.cc
@@ -418,7 +418,7 @@ void NewSpace::TearDown() {
void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
void NewSpace::Grow() {
- DCHECK(heap()->safepoint()->IsActive());
+ DCHECK_IMPLIES(FLAG_local_heaps, heap()->safepoint()->IsActive());
// Double the semispace size but only up to maximum capacity.
DCHECK(TotalCapacity() < MaximumCapacity());
size_t new_capacity =
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index b84ae26c90..7ce2c07462 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -16,7 +16,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/mark-compact.h"
#include "src/logging/counters.h"
-#include "src/objects/compilation-cache-inl.h"
+#include "src/objects/compilation-cache-table-inl.h"
#include "src/objects/heap-object.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
@@ -150,7 +150,7 @@ FieldStatsCollector::GetInobjectFieldStats(Map map) {
JSObjectFieldStats stats;
stats.embedded_fields_count_ = JSObject::GetEmbedderFieldCount(map);
if (!map.is_dictionary_map()) {
- DescriptorArray descriptors = map.instance_descriptors();
+ DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
for (InternalIndex descriptor : map.IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(descriptor);
if (details.location() == kField) {
@@ -565,9 +565,10 @@ void ObjectStatsCollectorImpl::RecordVirtualFunctionTemplateInfoDetails(
FunctionTemplateInfo fti) {
// named_property_handler and indexed_property_handler are recorded as
// INTERCEPTOR_INFO_TYPE.
- if (!fti.call_code().IsUndefined(isolate())) {
+ HeapObject call_code = fti.call_code(kAcquireLoad);
+ if (!call_code.IsUndefined(isolate())) {
RecordSimpleVirtualObjectStats(
- fti, CallHandlerInfo::cast(fti.call_code()),
+ fti, CallHandlerInfo::cast(call_code),
ObjectStats::FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE);
}
if (!fti.GetInstanceCallHandler().IsUndefined(isolate())) {
@@ -883,7 +884,7 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map map) {
// This will be logged as MAP_TYPE in Phase2.
}
- DescriptorArray array = map.instance_descriptors();
+ DescriptorArray array = map.instance_descriptors(kRelaxedLoad);
if (map.owns_descriptors() &&
array != ReadOnlyRoots(heap_).empty_descriptor_array()) {
// Generally DescriptorArrays have their own instance type already
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index bc532cfa79..395f76bf60 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -5,10 +5,10 @@
#ifndef V8_HEAP_OBJECTS_VISITING_INL_H_
#define V8_HEAP_OBJECTS_VISITING_INL_H_
-#include "src/heap/objects-visiting.h"
-
#include "src/heap/embedder-tracing.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/objects-visiting.h"
+#include "src/objects/arguments.h"
#include "src/objects/free-space-inl.h"
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/module-inl.h"
@@ -16,6 +16,8 @@
#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table.h"
+#include "src/objects/synthetic-module-inl.h"
+#include "src/objects/torque-defined-classes.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 218a7a03c9..64a05f48bf 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -87,16 +87,16 @@ static void ClearWeakList(Heap* heap, Object list) {
template <>
struct WeakListVisitor<Code> {
static void SetWeakNext(Code code, Object next) {
- code.code_data_container().set_next_code_link(next,
- UPDATE_WEAK_WRITE_BARRIER);
+ code.code_data_container(kAcquireLoad)
+ .set_next_code_link(next, UPDATE_WEAK_WRITE_BARRIER);
}
static Object WeakNext(Code code) {
- return code.code_data_container().next_code_link();
+ return code.code_data_container(kAcquireLoad).next_code_link();
}
static HeapObject WeakNextHolder(Code code) {
- return code.code_data_container();
+ return code.code_data_container(kAcquireLoad);
}
static int WeakNextOffset() { return CodeDataContainer::kNextCodeLinkOffset; }
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 7ecb66bcee..310ea893cc 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -25,7 +25,6 @@ namespace internal {
V(Context) \
V(CoverageInfo) \
V(DataHandler) \
- V(DescriptorArray) \
V(EmbedderDataArray) \
V(EphemeronHashTable) \
V(FeedbackCell) \
@@ -41,7 +40,6 @@ namespace internal {
V(JSWeakRef) \
V(Map) \
V(NativeContext) \
- V(Oddball) \
V(PreparseData) \
V(PropertyArray) \
V(PropertyCell) \
@@ -57,7 +55,6 @@ namespace internal {
V(UncompiledDataWithoutPreparseData) \
V(UncompiledDataWithPreparseData) \
V(WasmArray) \
- V(WasmCapiFunctionData) \
V(WasmIndirectFunctionTable) \
V(WasmInstanceObject) \
V(WasmStruct) \
diff --git a/deps/v8/src/heap/paged-spaces.cc b/deps/v8/src/heap/paged-spaces.cc
index 5ab30e3aa8..ff6b390ccf 100644
--- a/deps/v8/src/heap/paged-spaces.cc
+++ b/deps/v8/src/heap/paged-spaces.cc
@@ -435,10 +435,9 @@ void PagedSpace::FreeLinearAllocationArea() {
MemoryChunk::FromAddress(current_top));
}
- DCHECK_IMPLIES(
- current_limit - current_top >= 2 * kTaggedSize,
- heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
- HeapObject::FromAddress(current_top)));
+ DCHECK_IMPLIES(current_limit - current_top >= 2 * kTaggedSize,
+ heap()->incremental_marking()->marking_state()->IsWhite(
+ HeapObject::FromAddress(current_top)));
Free(current_top, current_limit - current_top,
SpaceAccountingMode::kSpaceAccounted);
}
@@ -844,6 +843,18 @@ bool CompactionSpace::RefillLabMain(int size_in_bytes,
return RawRefillLabMain(size_in_bytes, origin);
}
+bool PagedSpace::TryExpand(int size_in_bytes, AllocationOrigin origin) {
+ Page* page = Expand();
+ if (!page) return false;
+ if (!is_compaction_space()) {
+ heap()->NotifyOldGenerationExpansion(identity(), page);
+ }
+ DCHECK((CountTotalPages() > 1) ||
+ (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
+ return TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
+ origin);
+}
+
bool PagedSpace::RawRefillLabMain(int size_in_bytes, AllocationOrigin origin) {
// Non-compaction local spaces are not supported.
DCHECK_IMPLIES(is_local_space(), is_compaction_space());
@@ -886,33 +897,22 @@ bool PagedSpace::RawRefillLabMain(int size_in_bytes, AllocationOrigin origin) {
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() &&
heap()->CanExpandOldGeneration(AreaSize())) {
- Page* page = Expand();
- if (page) {
- if (!is_compaction_space()) {
- heap()->NotifyOldGenerationExpansion(identity(), page);
- }
- DCHECK((CountTotalPages() > 1) ||
- (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
- return TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
- origin);
+ if (TryExpand(size_in_bytes, origin)) {
+ return true;
}
}
- if (is_compaction_space()) {
- return ContributeToSweepingMain(0, 0, size_in_bytes, origin);
-
- } else {
- DCHECK(!is_local_space());
- if (collector->sweeping_in_progress()) {
- // Complete sweeping for this space.
- collector->DrainSweepingWorklistForSpace(identity());
- RefillFreeList();
+ // Try sweeping all pages.
+ if (ContributeToSweepingMain(0, 0, size_in_bytes, origin)) {
+ return true;
+ }
- // Last try to acquire memory from free list.
- return TryAllocationFromFreeListMain(size_in_bytes, origin);
- }
- return false;
+ if (heap()->gc_state() != Heap::NOT_IN_GC && !heap()->force_oom()) {
+ // Avoid OOM crash in the GC in order to invoke NearHeapLimitCallback after
+ // GC and give it a chance to increase the heap limit.
+ return TryExpand(size_in_bytes, origin);
}
+ return false;
}
bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
@@ -926,12 +926,11 @@ bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
- int max_freed = collector->sweeper()->ParallelSweepSpace(
- identity(), required_freed_bytes, max_pages,
- invalidated_slots_in_free_space);
+ collector->sweeper()->ParallelSweepSpace(identity(), required_freed_bytes,
+ max_pages,
+ invalidated_slots_in_free_space);
RefillFreeList();
- if (max_freed >= size_in_bytes)
- return TryAllocationFromFreeListMain(size_in_bytes, origin);
+ return TryAllocationFromFreeListMain(size_in_bytes, origin);
}
return false;
}
diff --git a/deps/v8/src/heap/paged-spaces.h b/deps/v8/src/heap/paged-spaces.h
index 198f12e103..97670517cd 100644
--- a/deps/v8/src/heap/paged-spaces.h
+++ b/deps/v8/src/heap/paged-spaces.h
@@ -386,6 +386,9 @@ class V8_EXPORT_PRIVATE PagedSpace
AllocationAlignment alignment,
AllocationOrigin origin);
+ V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes,
+ AllocationOrigin origin);
+
Executability executable_;
LocalSpaceKind local_space_kind_;
@@ -512,7 +515,8 @@ class MapSpace : public PagedSpace {
public:
// Creates a map space object.
explicit MapSpace(Heap* heap)
- : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, new FreeListMap()) {}
+ : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE,
+ FreeList::CreateFreeList()) {}
int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo(Map::kSize)) {
diff --git a/deps/v8/src/heap/parallel-work-item.h b/deps/v8/src/heap/parallel-work-item.h
new file mode 100644
index 0000000000..9f58d30519
--- /dev/null
+++ b/deps/v8/src/heap/parallel-work-item.h
@@ -0,0 +1,32 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_PARALLEL_WORK_ITEM_H_
+#define V8_HEAP_PARALLEL_WORK_ITEM_H_
+
+#include <atomic>
+
+namespace v8 {
+namespace internal {
+
+class ParallelWorkItem {
+ public:
+ ParallelWorkItem() = default;
+
+ bool TryAcquire() {
+ // memory_order_relaxed is sufficient as the work item's state itself hasn't
+ // been modified since the beginning of its associated job. This is only
+ // atomically acquiring the right to work on it.
+ return reinterpret_cast<std::atomic<bool>*>(&acquire_)->exchange(
+ true, std::memory_order_relaxed) == false;
+ }
+
+ private:
+ bool acquire_{false};
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_PARALLEL_WORK_ITEM_H_
diff --git a/deps/v8/src/heap/read-only-heap-inl.h b/deps/v8/src/heap/read-only-heap-inl.h
index d8358c5812..316f455013 100644
--- a/deps/v8/src/heap/read-only-heap-inl.h
+++ b/deps/v8/src/heap/read-only-heap-inl.h
@@ -15,8 +15,8 @@ namespace internal {
// static
ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) {
#ifdef V8_COMPRESS_POINTERS
- const Isolate* isolate = GetIsolateForPtrCompr(object);
- return ReadOnlyRoots(const_cast<Isolate*>(isolate));
+ IsolateRoot isolate = GetIsolateForPtrCompr(object);
+ return ReadOnlyRoots(Isolate::FromRootAddress(isolate.address()));
#else
#ifdef V8_SHARED_RO_HEAP
// This fails if we are creating heap objects and the roots haven't yet been
diff --git a/deps/v8/src/heap/read-only-heap.cc b/deps/v8/src/heap/read-only-heap.cc
index 590b94bc0b..342ad1d031 100644
--- a/deps/v8/src/heap/read-only-heap.cc
+++ b/deps/v8/src/heap/read-only-heap.cc
@@ -60,21 +60,24 @@ bool ReadOnlyHeap::IsSharedMemoryAvailable() {
SoleReadOnlyHeap* SoleReadOnlyHeap::shared_ro_heap_ = nullptr;
// static
-void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
+void ReadOnlyHeap::SetUp(Isolate* isolate,
+ SnapshotData* read_only_snapshot_data,
+ bool can_rehash) {
DCHECK_NOT_NULL(isolate);
if (IsReadOnlySpaceShared()) {
ReadOnlyHeap* ro_heap;
- if (des != nullptr) {
+ if (read_only_snapshot_data != nullptr) {
bool read_only_heap_created = false;
base::MutexGuard guard(read_only_heap_creation_mutex_.Pointer());
std::shared_ptr<ReadOnlyArtifacts> artifacts =
read_only_artifacts_.Get().lock();
if (!artifacts) {
artifacts = InitializeSharedReadOnlyArtifacts();
- artifacts->InitializeChecksum(des);
+ artifacts->InitializeChecksum(read_only_snapshot_data);
ro_heap = CreateInitalHeapForBootstrapping(isolate, artifacts);
- ro_heap->DeseralizeIntoIsolate(isolate, des);
+ ro_heap->DeseralizeIntoIsolate(isolate, read_only_snapshot_data,
+ can_rehash);
read_only_heap_created = true;
} else {
// With pointer compression, there is one ReadOnlyHeap per Isolate.
@@ -82,7 +85,8 @@ void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
ro_heap = artifacts->GetReadOnlyHeapForIsolate(isolate);
isolate->SetUpFromReadOnlyArtifacts(artifacts, ro_heap);
}
- artifacts->VerifyChecksum(des, read_only_heap_created);
+ artifacts->VerifyChecksum(read_only_snapshot_data,
+ read_only_heap_created);
ro_heap->InitializeIsolateRoots(isolate);
} else {
// This path should only be taken in mksnapshot, should only be run once
@@ -94,21 +98,24 @@ void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
artifacts = InitializeSharedReadOnlyArtifacts();
ro_heap = CreateInitalHeapForBootstrapping(isolate, artifacts);
- artifacts->VerifyChecksum(des, true);
+ artifacts->VerifyChecksum(read_only_snapshot_data, true);
}
} else {
auto* ro_heap = new ReadOnlyHeap(new ReadOnlySpace(isolate->heap()));
isolate->SetUpFromReadOnlyArtifacts(nullptr, ro_heap);
- if (des != nullptr) {
- ro_heap->DeseralizeIntoIsolate(isolate, des);
+ if (read_only_snapshot_data != nullptr) {
+ ro_heap->DeseralizeIntoIsolate(isolate, read_only_snapshot_data,
+ can_rehash);
}
}
}
void ReadOnlyHeap::DeseralizeIntoIsolate(Isolate* isolate,
- ReadOnlyDeserializer* des) {
- DCHECK_NOT_NULL(des);
- des->DeserializeInto(isolate);
+ SnapshotData* read_only_snapshot_data,
+ bool can_rehash) {
+ DCHECK_NOT_NULL(read_only_snapshot_data);
+ ReadOnlyDeserializer des(isolate, read_only_snapshot_data, can_rehash);
+ des.DeserializeIntoIsolate();
InitFromIsolate(isolate);
}
@@ -212,7 +219,11 @@ void ReadOnlyHeap::PopulateReadOnlySpaceStatistics(
// static
bool ReadOnlyHeap::Contains(Address address) {
- return BasicMemoryChunk::FromAddress(address)->InReadOnlySpace();
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ return third_party_heap::Heap::InReadOnlySpace(address);
+ } else {
+ return BasicMemoryChunk::FromAddress(address)->InReadOnlySpace();
+ }
}
// static
diff --git a/deps/v8/src/heap/read-only-heap.h b/deps/v8/src/heap/read-only-heap.h
index cc62b71e5b..aea6c8d0a7 100644
--- a/deps/v8/src/heap/read-only-heap.h
+++ b/deps/v8/src/heap/read-only-heap.h
@@ -25,10 +25,10 @@ class BasicMemoryChunk;
class Isolate;
class Page;
class ReadOnlyArtifacts;
-class ReadOnlyDeserializer;
class ReadOnlyPage;
class ReadOnlySpace;
class SharedReadOnlySpace;
+class SnapshotData;
// This class transparently manages read-only space, roots and cache creation
// and destruction.
@@ -47,7 +47,8 @@ class ReadOnlyHeap {
// V8_SHARED_RO_HEAP is enabled, a lock will be held until that method is
// called.
// TODO(v8:7464): Ideally we'd create this without needing a heap.
- static void SetUp(Isolate* isolate, ReadOnlyDeserializer* des);
+ static void SetUp(Isolate* isolate, SnapshotData* read_only_snapshot_data,
+ bool can_rehash);
// Indicates that the isolate has been set up and all read-only space objects
// have been created and will not be written to. This should only be called if
// a deserializer was not previously provided to Setup. When V8_SHARED_RO_HEAP
@@ -101,7 +102,9 @@ class ReadOnlyHeap {
Isolate* isolate, std::shared_ptr<ReadOnlyArtifacts> artifacts);
// Runs the read-only deserializer and calls InitFromIsolate to complete
// read-only heap initialization.
- void DeseralizeIntoIsolate(Isolate* isolate, ReadOnlyDeserializer* des);
+ void DeseralizeIntoIsolate(Isolate* isolate,
+ SnapshotData* read_only_snapshot_data,
+ bool can_rehash);
// Initializes read-only heap from an already set-up isolate, copying
// read-only roots from the isolate. This then seals the space off from
// further writes, marks it as read-only and detaches it from the heap
diff --git a/deps/v8/src/heap/read-only-spaces.cc b/deps/v8/src/heap/read-only-spaces.cc
index 1ceee90a50..b54bfc0389 100644
--- a/deps/v8/src/heap/read-only-spaces.cc
+++ b/deps/v8/src/heap/read-only-spaces.cc
@@ -28,7 +28,7 @@ namespace v8 {
namespace internal {
void CopyAndRebaseRoots(Address* src, Address* dst, Address new_base) {
- Address src_base = GetIsolateRoot(src[0]);
+ Address src_base = GetIsolateRootAddress(src[0]);
for (size_t i = 0; i < ReadOnlyHeap::kEntriesCount; ++i) {
dst[i] = src[i] - src_base + new_base;
}
@@ -39,22 +39,24 @@ void ReadOnlyArtifacts::set_read_only_heap(
read_only_heap_ = std::move(read_only_heap);
}
-void ReadOnlyArtifacts::InitializeChecksum(ReadOnlyDeserializer* des) {
+void ReadOnlyArtifacts::InitializeChecksum(
+ SnapshotData* read_only_snapshot_data) {
#ifdef DEBUG
- read_only_blob_checksum_ = des->GetChecksum();
+ read_only_blob_checksum_ = Checksum(read_only_snapshot_data->Payload());
#endif // DEBUG
}
-void ReadOnlyArtifacts::VerifyChecksum(ReadOnlyDeserializer* des,
+void ReadOnlyArtifacts::VerifyChecksum(SnapshotData* read_only_snapshot_data,
bool read_only_heap_created) {
#ifdef DEBUG
if (read_only_blob_checksum_) {
// The read-only heap was set up from a snapshot. Make sure it's the always
// the same snapshot.
- CHECK_WITH_MSG(des->GetChecksum(),
+ uint32_t snapshot_checksum = Checksum(read_only_snapshot_data->Payload());
+ CHECK_WITH_MSG(snapshot_checksum,
"Attempt to create the read-only heap after already "
"creating from a snapshot.");
- CHECK_EQ(read_only_blob_checksum_, des->GetChecksum());
+ CHECK_EQ(read_only_blob_checksum_, snapshot_checksum);
} else {
// If there's no checksum, then that means the read-only heap objects are
// being created.
@@ -113,7 +115,7 @@ void PointerCompressedReadOnlyArtifacts::InitializeRootsIn(Isolate* isolate) {
auto isolate_ro_roots =
isolate->roots_table().read_only_roots_begin().location();
CopyAndRebaseRoots(read_only_roots_, isolate_ro_roots,
- GetIsolateRoot(isolate));
+ isolate->isolate_root());
}
SharedReadOnlySpace* PointerCompressedReadOnlyArtifacts::CreateReadOnlySpace(
@@ -123,7 +125,7 @@ SharedReadOnlySpace* PointerCompressedReadOnlyArtifacts::CreateReadOnlySpace(
std::vector<std::unique_ptr<v8::PageAllocator::SharedMemoryMapping>> mappings;
std::vector<ReadOnlyPage*> pages;
- Address isolate_root = GetIsolateRoot(isolate);
+ Address isolate_root = isolate->isolate_root();
for (size_t i = 0; i < pages_.size(); ++i) {
const ReadOnlyPage* page = pages_[i];
const Tagged_t offset = OffsetForPage(i);
@@ -167,7 +169,7 @@ ReadOnlyHeap* PointerCompressedReadOnlyArtifacts::GetReadOnlyHeapForIsolate(
// ReadOnlyArtifacts and be decompressed on the fly.
auto original_cache = read_only_heap_->read_only_object_cache_;
auto& cache = read_only_heap->read_only_object_cache_;
- Address isolate_root = GetIsolateRoot(isolate);
+ Address isolate_root = isolate->isolate_root();
for (Object original_object : original_cache) {
Address original_address = original_object.ptr();
Address new_address = isolate_root + CompressTagged(original_address);
@@ -720,6 +722,7 @@ size_t ReadOnlyPage::ShrinkToHighWaterMark() {
}
void ReadOnlySpace::ShrinkPages() {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return;
BasicMemoryChunk::UpdateHighWaterMark(top_);
heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_),
ClearRecordedSlots::kNo);
diff --git a/deps/v8/src/heap/read-only-spaces.h b/deps/v8/src/heap/read-only-spaces.h
index 2bdf09d0ab..ffadcb55b3 100644
--- a/deps/v8/src/heap/read-only-spaces.h
+++ b/deps/v8/src/heap/read-only-spaces.h
@@ -20,9 +20,9 @@
namespace v8 {
namespace internal {
-class ReadOnlyDeserializer;
class MemoryAllocator;
class ReadOnlyHeap;
+class SnapshotData;
class ReadOnlyPage : public BasicMemoryChunk {
public:
@@ -100,8 +100,9 @@ class ReadOnlyArtifacts {
void set_read_only_heap(std::unique_ptr<ReadOnlyHeap> read_only_heap);
ReadOnlyHeap* read_only_heap() const { return read_only_heap_.get(); }
- void InitializeChecksum(ReadOnlyDeserializer* des);
- void VerifyChecksum(ReadOnlyDeserializer* des, bool read_only_heap_created);
+ void InitializeChecksum(SnapshotData* read_only_snapshot_data);
+ void VerifyChecksum(SnapshotData* read_only_snapshot_data,
+ bool read_only_heap_created);
protected:
ReadOnlyArtifacts() = default;
diff --git a/deps/v8/src/heap/safepoint.cc b/deps/v8/src/heap/safepoint.cc
index a306fbde08..2550851b30 100644
--- a/deps/v8/src/heap/safepoint.cc
+++ b/deps/v8/src/heap/safepoint.cc
@@ -21,7 +21,7 @@ void GlobalSafepoint::EnterSafepointScope() {
if (++active_safepoint_scopes_ > 1) return;
- TimedHistogramScope timer(heap_->isolate()->counters()->time_to_safepoint());
+ TimedHistogramScope timer(heap_->isolate()->counters()->stop_the_world());
TRACE_GC(heap_->tracer(), GCTracer::Scope::STOP_THE_WORLD);
local_heaps_mutex_.Lock();
@@ -40,8 +40,10 @@ void GlobalSafepoint::EnterSafepointScope() {
for (LocalHeap* current = local_heaps_head_; current;
current = current->next_) {
if (current == local_heap_of_this_thread_) {
+ DCHECK(current->is_main_thread());
continue;
}
+ DCHECK(!current->is_main_thread());
current->state_mutex_.Lock();
while (current->state_ == LocalHeap::ThreadState::Running) {
@@ -114,23 +116,6 @@ SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
SafepointScope::~SafepointScope() { safepoint_->LeaveSafepointScope(); }
-void GlobalSafepoint::AddLocalHeap(LocalHeap* local_heap) {
- base::MutexGuard guard(&local_heaps_mutex_);
- if (local_heaps_head_) local_heaps_head_->prev_ = local_heap;
- local_heap->prev_ = nullptr;
- local_heap->next_ = local_heaps_head_;
- local_heaps_head_ = local_heap;
-}
-
-void GlobalSafepoint::RemoveLocalHeap(LocalHeap* local_heap) {
- base::MutexGuard guard(&local_heaps_mutex_);
- if (local_heap->next_) local_heap->next_->prev_ = local_heap->prev_;
- if (local_heap->prev_)
- local_heap->prev_->next_ = local_heap->next_;
- else
- local_heaps_head_ = local_heap->next_;
-}
-
bool GlobalSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
base::MutexGuard guard(&local_heaps_mutex_);
LocalHeap* current = local_heaps_head_;
diff --git a/deps/v8/src/heap/safepoint.h b/deps/v8/src/heap/safepoint.h
index efe499ea13..dd2bb421be 100644
--- a/deps/v8/src/heap/safepoint.h
+++ b/deps/v8/src/heap/safepoint.h
@@ -62,8 +62,36 @@ class GlobalSafepoint {
void EnterSafepointScope();
void LeaveSafepointScope();
- void AddLocalHeap(LocalHeap* local_heap);
- void RemoveLocalHeap(LocalHeap* local_heap);
+ template <typename Callback>
+ void AddLocalHeap(LocalHeap* local_heap, Callback callback) {
+ // Safepoint holds this lock in order to stop threads from starting or
+ // stopping.
+ base::MutexGuard guard(&local_heaps_mutex_);
+
+ // Additional code protected from safepoint
+ callback();
+
+ // Add list to doubly-linked list
+ if (local_heaps_head_) local_heaps_head_->prev_ = local_heap;
+ local_heap->prev_ = nullptr;
+ local_heap->next_ = local_heaps_head_;
+ local_heaps_head_ = local_heap;
+ }
+
+ template <typename Callback>
+ void RemoveLocalHeap(LocalHeap* local_heap, Callback callback) {
+ base::MutexGuard guard(&local_heaps_mutex_);
+
+ // Additional code protected from safepoint
+ callback();
+
+ // Remove list from doubly-linked list
+ if (local_heap->next_) local_heap->next_->prev_ = local_heap->prev_;
+ if (local_heap->prev_)
+ local_heap->prev_->next_ = local_heap->next_;
+ else
+ local_heaps_head_ = local_heap->next_;
+ }
Barrier barrier_;
Heap* heap_;
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 18933a5ac7..8560b5b62b 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -38,6 +38,10 @@ bool Scavenger::PromotionList::View::Pop(struct PromotionListEntry* entry) {
return promotion_list_->Pop(task_id_, entry);
}
+void Scavenger::PromotionList::View::FlushToGlobal() {
+ promotion_list_->FlushToGlobal(task_id_);
+}
+
bool Scavenger::PromotionList::View::IsGlobalPoolEmpty() {
return promotion_list_->IsGlobalPoolEmpty();
}
@@ -78,6 +82,16 @@ bool Scavenger::PromotionList::Pop(int task_id,
return large_object_promotion_list_.Pop(task_id, entry);
}
+void Scavenger::PromotionList::FlushToGlobal(int task_id) {
+ regular_object_promotion_list_.FlushToGlobal(task_id);
+ large_object_promotion_list_.FlushToGlobal(task_id);
+}
+
+size_t Scavenger::PromotionList::GlobalPoolSize() const {
+ return regular_object_promotion_list_.GlobalPoolSize() +
+ large_object_promotion_list_.GlobalPoolSize();
+}
+
bool Scavenger::PromotionList::IsGlobalPoolEmpty() {
return regular_object_promotion_list_.IsGlobalPoolEmpty() &&
large_object_promotion_list_.IsGlobalPoolEmpty();
@@ -109,7 +123,7 @@ bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
heap()->CopyBlock(target.address() + kTaggedSize,
source.address() + kTaggedSize, size - kTaggedSize);
- if (!source.synchronized_compare_and_swap_map_word(
+ if (!source.release_compare_and_swap_map_word(
MapWord::FromMap(map), MapWord::FromForwardingAddress(target))) {
// Other task migrated the object.
return false;
@@ -214,7 +228,7 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
BasicMemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner_identity());
- if (object.synchronized_compare_and_swap_map_word(
+ if (object.release_compare_and_swap_map_word(
MapWord::FromMap(map), MapWord::FromForwardingAddress(object))) {
surviving_new_large_objects_.insert({object, map});
promoted_size_ += object_size;
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index f51a385085..ea4cb90459 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -25,65 +25,6 @@
namespace v8 {
namespace internal {
-class PageScavengingItem final : public ItemParallelJob::Item {
- public:
- explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
- ~PageScavengingItem() override = default;
-
- void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }
-
- private:
- MemoryChunk* const chunk_;
-};
-
-class ScavengingTask final : public ItemParallelJob::Task {
- public:
- ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier)
- : ItemParallelJob::Task(heap->isolate()),
- heap_(heap),
- scavenger_(scavenger),
- barrier_(barrier) {}
-
- void RunInParallel(Runner runner) final {
- if (runner == Runner::kForeground) {
- TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
- ProcessItems();
- } else {
- TRACE_BACKGROUND_GC(
- heap_->tracer(),
- GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
- ProcessItems();
- }
- }
-
- private:
- void ProcessItems() {
- double scavenging_time = 0.0;
- {
- barrier_->Start();
- TimedScope scope(&scavenging_time);
- PageScavengingItem* item = nullptr;
- while ((item = GetItem<PageScavengingItem>()) != nullptr) {
- item->Process(scavenger_);
- item->MarkFinished();
- }
- do {
- scavenger_->Process(barrier_);
- } while (!barrier_->Wait());
- scavenger_->Process();
- }
- if (FLAG_trace_parallel_scavenge) {
- PrintIsolate(heap_->isolate(),
- "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
- static_cast<void*>(this), scavenging_time,
- scavenger_->bytes_copied(), scavenger_->bytes_promoted());
- }
- }
- Heap* const heap_;
- Scavenger* const scavenger_;
- OneshotBarrier* const barrier_;
-};
-
class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
public:
IterateAndScavengePromotedObjectsVisitor(Scavenger* scavenger,
@@ -219,8 +160,81 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
}
};
+ScavengerCollector::JobTask::JobTask(
+ ScavengerCollector* outer,
+ std::vector<std::unique_ptr<Scavenger>>* scavengers,
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks,
+ Scavenger::CopiedList* copied_list,
+ Scavenger::PromotionList* promotion_list)
+ : outer_(outer),
+ scavengers_(scavengers),
+ memory_chunks_(std::move(memory_chunks)),
+ remaining_memory_chunks_(memory_chunks_.size()),
+ generator_(memory_chunks_.size()),
+ copied_list_(copied_list),
+ promotion_list_(promotion_list) {}
+
+void ScavengerCollector::JobTask::Run(JobDelegate* delegate) {
+ DCHECK_LT(delegate->GetTaskId(), scavengers_->size());
+ Scavenger* scavenger = (*scavengers_)[delegate->GetTaskId()].get();
+ if (delegate->IsJoiningThread()) {
+ TRACE_GC(outer_->heap_->tracer(),
+ GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
+ ProcessItems(delegate, scavenger);
+ } else {
+ TRACE_BACKGROUND_GC(
+ outer_->heap_->tracer(),
+ GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
+ ProcessItems(delegate, scavenger);
+ }
+}
+
+size_t ScavengerCollector::JobTask::GetMaxConcurrency(
+ size_t worker_count) const {
+ // We need to account for local segments held by worker_count in addition to
+ // GlobalPoolSize() of copied_list_ and promotion_list_.
+ return std::min<size_t>(
+ scavengers_->size(),
+ std::max<size_t>(remaining_memory_chunks_.load(std::memory_order_relaxed),
+ worker_count + copied_list_->GlobalPoolSize() +
+ promotion_list_->GlobalPoolSize()));
+}
+
+void ScavengerCollector::JobTask::ProcessItems(JobDelegate* delegate,
+ Scavenger* scavenger) {
+ double scavenging_time = 0.0;
+ {
+ TimedScope scope(&scavenging_time);
+ ConcurrentScavengePages(scavenger);
+ scavenger->Process(delegate);
+ }
+ if (FLAG_trace_parallel_scavenge) {
+ PrintIsolate(outer_->heap_->isolate(),
+ "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
+ static_cast<void*>(this), scavenging_time,
+ scavenger->bytes_copied(), scavenger->bytes_promoted());
+ }
+}
+
+void ScavengerCollector::JobTask::ConcurrentScavengePages(
+ Scavenger* scavenger) {
+ while (remaining_memory_chunks_.load(std::memory_order_relaxed) > 0) {
+ base::Optional<size_t> index = generator_.GetNext();
+ if (!index) return;
+ for (size_t i = *index; i < memory_chunks_.size(); ++i) {
+ auto& work_item = memory_chunks_[i];
+ if (!work_item.first.TryAcquire()) break;
+ scavenger->ScavengePage(work_item.second);
+ if (remaining_memory_chunks_.fetch_sub(1, std::memory_order_relaxed) <=
+ 1) {
+ return;
+ }
+ }
+ }
+}
+
ScavengerCollector::ScavengerCollector(Heap* heap)
- : isolate_(heap->isolate()), heap_(heap), parallel_scavenge_semaphore_(0) {}
+ : isolate_(heap->isolate()), heap_(heap) {}
// Remove this crashkey after chromium:1010312 is fixed.
class ScopedFullHeapCrashKey {
@@ -246,23 +260,12 @@ void ScavengerCollector::CollectGarbage() {
}
DCHECK(surviving_new_large_objects_.empty());
- ItemParallelJob job(isolate_->cancelable_task_manager(),
- &parallel_scavenge_semaphore_);
- const int kMainThreadId = 0;
- Scavenger* scavengers[kMaxScavengerTasks];
- const bool is_logging = isolate_->LogObjectRelocation();
- const int num_scavenge_tasks = NumberOfScavengeTasks();
- OneshotBarrier barrier(base::TimeDelta::FromMilliseconds(kMaxWaitTimeMs));
+ std::vector<std::unique_ptr<Scavenger>> scavengers;
Worklist<MemoryChunk*, 64> empty_chunks;
+ const int num_scavenge_tasks = NumberOfScavengeTasks();
Scavenger::CopiedList copied_list(num_scavenge_tasks);
Scavenger::PromotionList promotion_list(num_scavenge_tasks);
EphemeronTableList ephemeron_table_list(num_scavenge_tasks);
- for (int i = 0; i < num_scavenge_tasks; i++) {
- scavengers[i] =
- new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
- &promotion_list, &ephemeron_table_list, i);
- job.AddTask(new ScavengingTask(heap_, scavengers[i], &barrier));
- }
{
Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
@@ -289,12 +292,20 @@ void ScavengerCollector::CollectGarbage() {
return !page->ContainsSlots<OLD_TO_NEW>() && !page->sweeping_slot_set();
});
+ const bool is_logging = isolate_->LogObjectRelocation();
+ for (int i = 0; i < num_scavenge_tasks; ++i) {
+ scavengers.emplace_back(
+ new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
+ &promotion_list, &ephemeron_table_list, i));
+ }
+
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks;
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- heap_, [&job](MemoryChunk* chunk) {
- job.AddItem(new PageScavengingItem(chunk));
+ heap_, [&memory_chunks](MemoryChunk* chunk) {
+ memory_chunks.emplace_back(ParallelWorkItem{}, chunk);
});
- RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId]);
+ RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId].get());
{
// Identify weak unmodified handles. Requires an unmodified graph.
@@ -319,18 +330,24 @@ void ScavengerCollector::CollectGarbage() {
heap_->IterateRoots(&root_scavenge_visitor, options);
isolate_->global_handles()->IterateYoungStrongAndDependentRoots(
&root_scavenge_visitor);
+ scavengers[kMainThreadId]->Flush();
}
{
// Parallel phase scavenging all copied and promoted objects.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
- job.Run();
+ V8::GetCurrentPlatform()
+ ->PostJob(v8::TaskPriority::kUserBlocking,
+ std::make_unique<JobTask>(this, &scavengers,
+ std::move(memory_chunks),
+ &copied_list, &promotion_list))
+ ->Join();
DCHECK(copied_list.IsEmpty());
DCHECK(promotion_list.IsEmpty());
}
if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
- IterateStackAndScavenge(&root_scavenge_visitor, scavengers,
- num_scavenge_tasks, kMainThreadId);
+ IterateStackAndScavenge(&root_scavenge_visitor, &scavengers,
+ kMainThreadId);
DCHECK(copied_list.IsEmpty());
DCHECK(promotion_list.IsEmpty());
}
@@ -357,10 +374,10 @@ void ScavengerCollector::CollectGarbage() {
DCHECK(surviving_new_large_objects_.empty());
- for (int i = 0; i < num_scavenge_tasks; i++) {
- scavengers[i]->Finalize();
- delete scavengers[i];
+ for (auto& scavenger : scavengers) {
+ scavenger->Finalize();
}
+ scavengers.clear();
HandleSurvivingNewLargeObjects();
}
@@ -420,23 +437,24 @@ void ScavengerCollector::CollectGarbage() {
}
void ScavengerCollector::IterateStackAndScavenge(
- RootScavengeVisitor* root_scavenge_visitor, Scavenger** scavengers,
- int num_scavenge_tasks, int main_thread_id) {
+
+ RootScavengeVisitor* root_scavenge_visitor,
+ std::vector<std::unique_ptr<Scavenger>>* scavengers, int main_thread_id) {
// Scan the stack, scavenge the newly discovered objects, and report
// the survival statistics before and afer the stack scanning.
// This code is not intended for production.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_STACK_ROOTS);
size_t survived_bytes_before = 0;
- for (int i = 0; i < num_scavenge_tasks; i++) {
+ for (auto& scavenger : *scavengers) {
survived_bytes_before +=
- scavengers[i]->bytes_copied() + scavengers[i]->bytes_promoted();
+ scavenger->bytes_copied() + scavenger->bytes_promoted();
}
heap_->IterateStackRoots(root_scavenge_visitor);
- scavengers[main_thread_id]->Process();
+ (*scavengers)[main_thread_id]->Process();
size_t survived_bytes_after = 0;
- for (int i = 0; i < num_scavenge_tasks; i++) {
+ for (auto& scavenger : *scavengers) {
survived_bytes_after +=
- scavengers[i]->bytes_copied() + scavengers[i]->bytes_promoted();
+ scavenger->bytes_copied() + scavenger->bytes_promoted();
}
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"V8.GCScavengerStackScanning", "survived_bytes_before",
@@ -590,10 +608,9 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
AddPageToSweeperIfNecessary(page);
}
-void Scavenger::Process(OneshotBarrier* barrier) {
+void Scavenger::Process(JobDelegate* delegate) {
ScavengeVisitor scavenge_visitor(this);
- const bool have_barrier = barrier != nullptr;
bool done;
size_t objects = 0;
do {
@@ -603,9 +620,9 @@ void Scavenger::Process(OneshotBarrier* barrier) {
copied_list_.Pop(&object_and_size)) {
scavenge_visitor.Visit(object_and_size.first);
done = false;
- if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
+ if (delegate && ((++objects % kInterruptThreshold) == 0)) {
if (!copied_list_.IsGlobalPoolEmpty()) {
- barrier->NotifyAll();
+ delegate->NotifyConcurrencyIncrease();
}
}
}
@@ -615,9 +632,9 @@ void Scavenger::Process(OneshotBarrier* barrier) {
HeapObject target = entry.heap_object;
IterateAndScavengePromotedObject(target, entry.map, entry.size);
done = false;
- if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
+ if (delegate && ((++objects % kInterruptThreshold) == 0)) {
if (!promotion_list_.IsGlobalPoolEmpty()) {
- barrier->NotifyAll();
+ delegate->NotifyConcurrencyIncrease();
}
}
}
@@ -705,6 +722,11 @@ void Scavenger::Finalize() {
}
}
+void Scavenger::Flush() {
+ copied_list_.FlushToGlobal();
+ promotion_list_.FlushToGlobal();
+}
+
void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
ephemeron_table_list_.Push(table);
}
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index d96219fd51..481ec4d558 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -6,8 +6,10 @@
#define V8_HEAP_SCAVENGER_H_
#include "src/base/platform/condition-variable.h"
+#include "src/heap/index-generator.h"
#include "src/heap/local-allocator.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/parallel-work-item.h"
#include "src/heap/slot-set.h"
#include "src/heap/worklist.h"
@@ -33,38 +35,7 @@ constexpr int kEphemeronTableListSegmentSize = 128;
using EphemeronTableList =
Worklist<EphemeronHashTable, kEphemeronTableListSegmentSize>;
-class ScavengerCollector {
- public:
- static const int kMaxScavengerTasks = 8;
- static const int kMaxWaitTimeMs = 2;
-
- explicit ScavengerCollector(Heap* heap);
-
- void CollectGarbage();
-
- private:
- void MergeSurvivingNewLargeObjects(
- const SurvivingNewLargeObjectsMap& objects);
-
- int NumberOfScavengeTasks();
-
- void ProcessWeakReferences(EphemeronTableList* ephemeron_table_list);
- void ClearYoungEphemerons(EphemeronTableList* ephemeron_table_list);
- void ClearOldEphemerons();
- void HandleSurvivingNewLargeObjects();
-
- void SweepArrayBufferExtensions();
-
- void IterateStackAndScavenge(RootScavengeVisitor* root_scavenge_visitor,
- Scavenger** scavengers, int num_scavenge_tasks,
- int main_thread_id);
- Isolate* const isolate_;
- Heap* const heap_;
- base::Semaphore parallel_scavenge_semaphore_;
- SurvivingNewLargeObjectsMap surviving_new_large_objects_;
-
- friend class Scavenger;
-};
+class ScavengerCollector;
class Scavenger {
public:
@@ -88,6 +59,7 @@ class Scavenger {
inline bool Pop(struct PromotionListEntry* entry);
inline bool IsGlobalPoolEmpty();
inline bool ShouldEagerlyProcessPromotionList();
+ inline void FlushToGlobal();
private:
PromotionList* promotion_list_;
@@ -102,10 +74,12 @@ class Scavenger {
inline void PushLargeObject(int task_id, HeapObject object, Map map,
int size);
inline bool IsEmpty();
+ inline size_t GlobalPoolSize() const;
inline size_t LocalPushSegmentSize(int task_id);
inline bool Pop(int task_id, struct PromotionListEntry* entry);
inline bool IsGlobalPoolEmpty();
inline bool ShouldEagerlyProcessPromotionList(int task_id);
+ inline void FlushToGlobal(int task_id);
private:
static const int kRegularObjectPromotionListSegmentSize = 256;
@@ -134,10 +108,11 @@ class Scavenger {
// Processes remaining work (=objects) after single objects have been
// manually scavenged using ScavengeObject or CheckAndScavengeObject.
- void Process(OneshotBarrier* barrier = nullptr);
+ void Process(JobDelegate* delegate = nullptr);
// Finalize the Scavenger. Needs to be called from the main thread.
void Finalize();
+ void Flush();
void AddEphemeronHashTable(EphemeronHashTable table);
@@ -276,6 +251,66 @@ class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
Scavenger* const scavenger_;
};
+class ScavengerCollector {
+ public:
+ static const int kMaxScavengerTasks = 8;
+ static const int kMainThreadId = 0;
+
+ explicit ScavengerCollector(Heap* heap);
+
+ void CollectGarbage();
+
+ private:
+ class JobTask : public v8::JobTask {
+ public:
+ explicit JobTask(
+ ScavengerCollector* outer,
+ std::vector<std::unique_ptr<Scavenger>>* scavengers,
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks,
+ Scavenger::CopiedList* copied_list,
+ Scavenger::PromotionList* promotion_list);
+
+ void Run(JobDelegate* delegate) override;
+ size_t GetMaxConcurrency(size_t worker_count) const override;
+
+ private:
+ void ProcessItems(JobDelegate* delegate, Scavenger* scavenger);
+ void ConcurrentScavengePages(Scavenger* scavenger);
+
+ ScavengerCollector* outer_;
+
+ std::vector<std::unique_ptr<Scavenger>>* scavengers_;
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks_;
+ std::atomic<size_t> remaining_memory_chunks_{0};
+ IndexGenerator generator_;
+
+ Scavenger::CopiedList* copied_list_;
+ Scavenger::PromotionList* promotion_list_;
+ };
+
+ void MergeSurvivingNewLargeObjects(
+ const SurvivingNewLargeObjectsMap& objects);
+
+ int NumberOfScavengeTasks();
+
+ void ProcessWeakReferences(EphemeronTableList* ephemeron_table_list);
+ void ClearYoungEphemerons(EphemeronTableList* ephemeron_table_list);
+ void ClearOldEphemerons();
+ void HandleSurvivingNewLargeObjects();
+
+ void SweepArrayBufferExtensions();
+
+ void IterateStackAndScavenge(
+ RootScavengeVisitor* root_scavenge_visitor,
+ std::vector<std::unique_ptr<Scavenger>>* scavengers, int main_thread_id);
+
+ Isolate* const isolate_;
+ Heap* const heap_;
+ SurvivingNewLargeObjectsMap surviving_new_large_objects_;
+
+ friend class Scavenger;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index b1844256e8..521d1a10c6 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -42,11 +42,9 @@
#include "src/objects/string.h"
#include "src/objects/synthetic-module.h"
#include "src/objects/template-objects-inl.h"
+#include "src/objects/torque-defined-classes-inl.h"
#include "src/regexp/regexp.h"
#include "src/wasm/wasm-objects.h"
-#include "torque-generated/class-definitions.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/internal-class-definitions-inl.h"
namespace v8 {
namespace internal {
@@ -167,7 +165,8 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
map.set_instance_size(instance_size);
// Initialize to only containing tagged fields.
if (FLAG_unbox_double_fields) {
- map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout(),
+ kReleaseStore);
}
// GetVisitorId requires a properly initialized LayoutDescriptor.
map.set_visitor_id(Map::GetVisitorId(map));
@@ -194,7 +193,8 @@ void Heap::FinalizePartialMap(Map map) {
map.set_raw_transitions(MaybeObject::FromSmi(Smi::zero()));
map.SetInstanceDescriptors(isolate(), roots.empty_descriptor_array(), 0);
if (FLAG_unbox_double_fields) {
- map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout(),
+ kReleaseStore);
}
map.set_prototype(roots.null_value());
map.set_constructor_or_backpointer(roots.null_value());
@@ -427,8 +427,11 @@ bool Heap::CreateInitialMaps() {
TORQUE_DEFINED_FIXED_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_MAP);
#undef TORQUE_ALLOCATE_MAP
-#define TORQUE_ALLOCATE_VARSIZE_MAP(NAME, Name, name) \
- ALLOCATE_VARSIZE_MAP(NAME, name)
+#define TORQUE_ALLOCATE_VARSIZE_MAP(NAME, Name, name) \
+ /* The DescriptorArray map is pre-allocated and initialized above. */ \
+ if (NAME != DESCRIPTOR_ARRAY_TYPE) { \
+ ALLOCATE_VARSIZE_MAP(NAME, name) \
+ }
TORQUE_DEFINED_VARSIZE_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_VARSIZE_MAP);
#undef TORQUE_ALLOCATE_VARSIZE_MAP
@@ -842,25 +845,23 @@ void Heap::CreateInitialObjects() {
set_next_template_serial_number(Smi::zero());
// Allocate the empty OrderedHashMap.
- Handle<FixedArray> empty_ordered_hash_map = factory->NewFixedArray(
- OrderedHashMap::HashTableStartIndex(), AllocationType::kReadOnly);
- empty_ordered_hash_map->set_map_no_write_barrier(
- *factory->ordered_hash_map_map());
- for (int i = 0; i < empty_ordered_hash_map->length(); ++i) {
- empty_ordered_hash_map->set(i, Smi::zero());
- }
+ Handle<OrderedHashMap> empty_ordered_hash_map =
+ OrderedHashMap::AllocateEmpty(isolate(), AllocationType::kReadOnly)
+ .ToHandleChecked();
set_empty_ordered_hash_map(*empty_ordered_hash_map);
// Allocate the empty OrderedHashSet.
- Handle<FixedArray> empty_ordered_hash_set = factory->NewFixedArray(
- OrderedHashSet::HashTableStartIndex(), AllocationType::kReadOnly);
- empty_ordered_hash_set->set_map_no_write_barrier(
- *factory->ordered_hash_set_map());
- for (int i = 0; i < empty_ordered_hash_set->length(); ++i) {
- empty_ordered_hash_set->set(i, Smi::zero());
- }
+ Handle<OrderedHashSet> empty_ordered_hash_set =
+ OrderedHashSet::AllocateEmpty(isolate(), AllocationType::kReadOnly)
+ .ToHandleChecked();
set_empty_ordered_hash_set(*empty_ordered_hash_set);
+ // Allocate the empty OrderedNameDictionary
+ Handle<OrderedNameDictionary> empty_ordered_property_dictionary =
+ OrderedNameDictionary::AllocateEmpty(isolate(), AllocationType::kReadOnly)
+ .ToHandleChecked();
+ set_empty_ordered_property_dictionary(*empty_ordered_property_dictionary);
+
// Allocate the empty FeedbackMetadata.
Handle<FeedbackMetadata> empty_feedback_metadata =
factory->NewFeedbackMetadata(0, 0, AllocationType::kReadOnly);
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 8020226c00..7f2d243aec 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -101,8 +101,10 @@ class SemiSpace;
#define DCHECK_OBJECT_SIZE(size) \
DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
-#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
- DCHECK((0 < size) && (size <= code_space->AreaSize()))
+#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
+ DCHECK((0 < size) && \
+ (size <= std::min(MemoryChunkLayout::MaxRegularCodeObjectSize(), \
+ code_space->AreaSize())))
// ----------------------------------------------------------------------------
// Space is the abstract superclass for all allocation spaces that are not
diff --git a/deps/v8/src/heap/third-party/heap-api.h b/deps/v8/src/heap/third-party/heap-api.h
index 5eaae847a7..c4712b988e 100644
--- a/deps/v8/src/heap/third-party/heap-api.h
+++ b/deps/v8/src/heap/third-party/heap-api.h
@@ -30,8 +30,13 @@ class Heap {
static bool InReadOnlySpace(Address address);
+ static bool InLargeObjectSpace(Address address);
+
static bool IsValidHeapObject(HeapObject object);
+ void ResetIterator();
+ HeapObject NextObject();
+
bool CollectGarbage();
};
diff --git a/deps/v8/src/heap/weak-object-worklists.cc b/deps/v8/src/heap/weak-object-worklists.cc
new file mode 100644
index 0000000000..532739000f
--- /dev/null
+++ b/deps/v8/src/heap/weak-object-worklists.cc
@@ -0,0 +1,172 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/weak-object-worklists.h"
+
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/worklist.h"
+#include "src/objects/hash-table.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/js-function.h"
+#include "src/objects/js-weak-refs-inl.h"
+#include "src/objects/js-weak-refs.h"
+#include "src/objects/shared-function-info.h"
+#include "src/objects/transitions.h"
+
+namespace v8 {
+
+namespace internal {
+
+void WeakObjects::UpdateAfterScavenge() {
+#define INVOKE_UPDATE(_, name, Name) Update##Name(name);
+ WEAK_OBJECT_WORKLISTS(INVOKE_UPDATE)
+#undef INVOKE_UPDATE
+}
+
+void WeakObjects::UpdateTransitionArrays(
+ WeakObjectWorklist<TransitionArray>& transition_arrays) {
+ DCHECK(!ContainsYoungObjects(transition_arrays));
+}
+
+void WeakObjects::UpdateEphemeronHashTables(
+ WeakObjectWorklist<EphemeronHashTable>& ephemeron_hash_tables) {
+ ephemeron_hash_tables.Update(
+ [](EphemeronHashTable slot_in, EphemeronHashTable* slot_out) -> bool {
+ EphemeronHashTable forwarded = ForwardingAddress(slot_in);
+
+ if (!forwarded.is_null()) {
+ *slot_out = forwarded;
+ return true;
+ }
+
+ return false;
+ });
+}
+
+namespace {
+bool EphemeronUpdater(Ephemeron slot_in, Ephemeron* slot_out) {
+ HeapObject key = slot_in.key;
+ HeapObject value = slot_in.value;
+ HeapObject forwarded_key = ForwardingAddress(key);
+ HeapObject forwarded_value = ForwardingAddress(value);
+
+ if (!forwarded_key.is_null() && !forwarded_value.is_null()) {
+ *slot_out = Ephemeron{forwarded_key, forwarded_value};
+ return true;
+ }
+
+ return false;
+}
+} // anonymous namespace
+
+void WeakObjects::UpdateCurrentEphemerons(
+ WeakObjectWorklist<Ephemeron>& current_ephemerons) {
+ current_ephemerons.Update(EphemeronUpdater);
+}
+
+void WeakObjects::UpdateNextEphemerons(
+ WeakObjectWorklist<Ephemeron>& next_ephemerons) {
+ next_ephemerons.Update(EphemeronUpdater);
+}
+
+void WeakObjects::UpdateDiscoveredEphemerons(
+ WeakObjectWorklist<Ephemeron>& discovered_ephemerons) {
+ discovered_ephemerons.Update(EphemeronUpdater);
+}
+
+void WeakObjects::UpdateWeakReferences(
+ WeakObjectWorklist<HeapObjectAndSlot>& weak_references) {
+ weak_references.Update(
+ [](HeapObjectAndSlot slot_in, HeapObjectAndSlot* slot_out) -> bool {
+ HeapObject heap_obj = slot_in.first;
+ HeapObject forwarded = ForwardingAddress(heap_obj);
+
+ if (!forwarded.is_null()) {
+ ptrdiff_t distance_to_slot =
+ slot_in.second.address() - slot_in.first.ptr();
+ Address new_slot = forwarded.ptr() + distance_to_slot;
+ slot_out->first = forwarded;
+ slot_out->second = HeapObjectSlot(new_slot);
+ return true;
+ }
+
+ return false;
+ });
+}
+
+void WeakObjects::UpdateWeakObjectsInCode(
+ WeakObjectWorklist<HeapObjectAndCode>& weak_objects_in_code) {
+ weak_objects_in_code.Update(
+ [](HeapObjectAndCode slot_in, HeapObjectAndCode* slot_out) -> bool {
+ HeapObject heap_obj = slot_in.first;
+ HeapObject forwarded = ForwardingAddress(heap_obj);
+
+ if (!forwarded.is_null()) {
+ slot_out->first = forwarded;
+ slot_out->second = slot_in.second;
+ return true;
+ }
+
+ return false;
+ });
+}
+
+void WeakObjects::UpdateJSWeakRefs(
+ WeakObjectWorklist<JSWeakRef>& js_weak_refs) {
+ if (FLAG_harmony_weak_refs) {
+ js_weak_refs.Update(
+ [](JSWeakRef js_weak_ref_in, JSWeakRef* js_weak_ref_out) -> bool {
+ JSWeakRef forwarded = ForwardingAddress(js_weak_ref_in);
+
+ if (!forwarded.is_null()) {
+ *js_weak_ref_out = forwarded;
+ return true;
+ }
+
+ return false;
+ });
+ }
+}
+
+void WeakObjects::UpdateWeakCells(WeakObjectWorklist<WeakCell>& weak_cells) {
+ // TODO(syg, marja): Support WeakCells in the young generation.
+ DCHECK(!ContainsYoungObjects(weak_cells));
+}
+
+void WeakObjects::UpdateBytecodeFlushingCandidates(
+ WeakObjectWorklist<SharedFunctionInfo>& bytecode_flushing_candidates) {
+ DCHECK(!ContainsYoungObjects(bytecode_flushing_candidates));
+}
+
+void WeakObjects::UpdateFlushedJSFunctions(
+ WeakObjectWorklist<JSFunction>& flushed_js_functions) {
+ flushed_js_functions.Update(
+ [](JSFunction slot_in, JSFunction* slot_out) -> bool {
+ JSFunction forwarded = ForwardingAddress(slot_in);
+
+ if (!forwarded.is_null()) {
+ *slot_out = forwarded;
+ return true;
+ }
+
+ return false;
+ });
+}
+
+#ifdef DEBUG
+template <typename Type>
+bool WeakObjects::ContainsYoungObjects(WeakObjectWorklist<Type>& worklist) {
+ bool result = false;
+ worklist.Iterate([&result](Type candidate) {
+ if (Heap::InYoungGeneration(candidate)) {
+ result = true;
+ }
+ });
+ return result;
+}
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/weak-object-worklists.h b/deps/v8/src/heap/weak-object-worklists.h
new file mode 100644
index 0000000000..67df372b57
--- /dev/null
+++ b/deps/v8/src/heap/weak-object-worklists.h
@@ -0,0 +1,90 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_WEAK_OBJECT_WORKLISTS_H_
+#define V8_HEAP_WEAK_OBJECT_WORKLISTS_H_
+
+#include "src/common/globals.h"
+#include "src/heap/worklist.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/js-weak-refs.h"
+
+namespace v8 {
+namespace internal {
+
+struct Ephemeron {
+ HeapObject key;
+ HeapObject value;
+};
+
+using HeapObjectAndSlot = std::pair<HeapObject, HeapObjectSlot>;
+using HeapObjectAndCode = std::pair<HeapObject, Code>;
+class EphemeronHashTable;
+class JSFunction;
+class SharedFunctionInfo;
+class TransitionArray;
+
+// Weak objects and weak references discovered during incremental/concurrent
+// marking. They are processed in ClearNonLiveReferences after marking.
+// Each entry in this list specifies:
+// 1) Type of the worklist entry.
+// 2) Lower-case name of the worklsit.
+// 3) Capitalized name of the worklist.
+//
+// If you add a new entry, then you also need to implement the corresponding
+// Update*() function in the cc file for updating pointers after Scavenge.
+#define WEAK_OBJECT_WORKLISTS(F) \
+ F(TransitionArray, transition_arrays, TransitionArrays) \
+ /* Keep track of all EphemeronHashTables in the heap to process \
+ them in the atomic pause. */ \
+ F(EphemeronHashTable, ephemeron_hash_tables, EphemeronHashTables) \
+ /* Keep track of all ephemerons for concurrent marking tasks. Only store \
+ ephemerons in these worklists if both (key, value) are unreachable at \
+ the moment. \
+ MarkCompactCollector::ProcessEphemeronsUntilFixpoint drains/fills \
+ these worklists. current_ephemerons is used as draining worklist in \
+ the current fixpoint iteration. */ \
+ F(Ephemeron, current_ephemerons, CurrentEphemerons) \
+ /* Stores ephemerons to visit in the next fixpoint iteration. */ \
+ F(Ephemeron, next_ephemerons, NextEphemerons) \
+ /* When draining the marking worklist new discovered ephemerons are pushed \
+ into this worklist. */ \
+ F(Ephemeron, discovered_ephemerons, DiscoveredEphemerons) \
+ /* TODO(marja): For old space, we only need the slot, not the host object. \
+ Optimize this by adding a different storage for old space. */ \
+ F(HeapObjectAndSlot, weak_references, WeakReferences) \
+ F(HeapObjectAndCode, weak_objects_in_code, WeakObjectsInCode) \
+ F(JSWeakRef, js_weak_refs, JSWeakRefs) \
+ F(WeakCell, weak_cells, WeakCells) \
+ F(SharedFunctionInfo, bytecode_flushing_candidates, \
+ BytecodeFlushingCandidates) \
+ F(JSFunction, flushed_js_functions, FlushedJSFunctions)
+
+class WeakObjects {
+ public:
+ template <typename Type>
+ using WeakObjectWorklist = Worklist<Type, 64>;
+
+#define DECLARE_WORKLIST(Type, name, _) WeakObjectWorklist<Type> name;
+ WEAK_OBJECT_WORKLISTS(DECLARE_WORKLIST)
+#undef DECLARE_WORKLIST
+
+ void UpdateAfterScavenge();
+
+ private:
+#define DECLARE_UPDATE_METHODS(Type, _, Name) \
+ void Update##Name(WeakObjectWorklist<Type>&);
+ WEAK_OBJECT_WORKLISTS(DECLARE_UPDATE_METHODS)
+#undef DECLARE_UPDATE_METHODS
+
+#ifdef DEBUG
+ template <typename Type>
+ bool ContainsYoungObjects(WeakObjectWorklist<Type>& worklist);
+#endif
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_WEAK_OBJECT_WORKLISTS_H_
diff --git a/deps/v8/src/ic/DIR_METADATA b/deps/v8/src/ic/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/ic/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/ic/OWNERS b/deps/v8/src/ic/OWNERS
index 816ddb52c5..5bf39a2df1 100644
--- a/deps/v8/src/ic/OWNERS
+++ b/deps/v8/src/ic/OWNERS
@@ -4,5 +4,3 @@ jkummerow@chromium.org
mvstanton@chromium.org
verwaest@chromium.org
mythria@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 40728edf90..c9e517dccb 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -211,8 +211,7 @@ void AccessorAssembler::HandleLoadAccessor(
TNode<Foreign> foreign = LoadObjectField<Foreign>(
call_handler_info, CallHandlerInfo::kJsCallbackOffset);
- TNode<RawPtrT> callback =
- DecodeExternalPointer(LoadForeignForeignAddress(foreign));
+ TNode<RawPtrT> callback = LoadForeignForeignAddressPtr(foreign);
TNode<Object> data =
LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
@@ -1669,8 +1668,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
TNode<Foreign> foreign = LoadObjectField<Foreign>(
call_handler_info, CallHandlerInfo::kJsCallbackOffset);
- TNode<RawPtrT> callback =
- DecodeExternalPointer(LoadForeignForeignAddress(foreign));
+ TNode<RawPtrT> callback = LoadForeignForeignAddressPtr(foreign);
TNode<Object> data =
LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
@@ -2499,9 +2497,9 @@ void AccessorAssembler::GenericPropertyLoad(
var_holder_map = proto_map;
var_holder_instance_type = proto_instance_type;
Label next_proto(this), return_value(this, &var_value), goto_slow(this);
- TryGetOwnProperty(p->context(), CAST(p->receiver()), CAST(proto),
- proto_map, proto_instance_type, name, &return_value,
- &var_value, &next_proto, &goto_slow);
+ TryGetOwnProperty(p->context(), p->receiver(), CAST(proto), proto_map,
+ proto_instance_type, name, &return_value, &var_value,
+ &next_proto, &goto_slow);
// This trampoline and the next are required to appease Turbofan's
// variable merging.
@@ -3731,11 +3729,11 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
void AccessorAssembler::GenerateLoadIC() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector);
LoadIC(&p);
@@ -3744,11 +3742,11 @@ void AccessorAssembler::GenerateLoadIC() {
void AccessorAssembler::GenerateLoadIC_Megamorphic() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
ExitPoint direct_exit(this);
TVARIABLE(MaybeObject, var_handler);
@@ -3778,11 +3776,11 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() {
void AccessorAssembler::GenerateLoadIC_Noninlined() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<FeedbackVector> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<FeedbackVector>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
ExitPoint direct_exit(this);
TVARIABLE(MaybeObject, var_handler);
@@ -3811,10 +3809,10 @@ void AccessorAssembler::GenerateLoadIC_Noninlined() {
void AccessorAssembler::GenerateLoadIC_NoFeedback() {
using Descriptor = LoadNoFeedbackDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Smi> ic_kind = CAST(Parameter(Descriptor::kICKind));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto ic_kind = Parameter<Smi>(Descriptor::kICKind);
LoadICParameters p(context, receiver, name,
TaggedIndexConstant(FeedbackSlot::Invalid().ToInt()),
@@ -3825,10 +3823,10 @@ void AccessorAssembler::GenerateLoadIC_NoFeedback() {
void AccessorAssembler::GenerateLoadICTrampoline() {
using Descriptor = LoadDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kLoadIC, context, receiver, name, slot, vector);
@@ -3837,10 +3835,10 @@ void AccessorAssembler::GenerateLoadICTrampoline() {
void AccessorAssembler::GenerateLoadICTrampoline_Megamorphic() {
using Descriptor = LoadDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kLoadIC_Megamorphic, context, receiver, name, slot,
@@ -3850,13 +3848,12 @@ void AccessorAssembler::GenerateLoadICTrampoline_Megamorphic() {
void AccessorAssembler::GenerateLoadSuperIC() {
using Descriptor = LoadWithReceiverAndVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> lookup_start_object =
- CAST(Parameter(Descriptor::kLookupStartObject));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto lookup_start_object = Parameter<Object>(Descriptor::kLookupStartObject);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector,
lookup_start_object);
@@ -3866,9 +3863,9 @@ void AccessorAssembler::GenerateLoadSuperIC() {
void AccessorAssembler::GenerateLoadGlobalIC_NoFeedback() {
using Descriptor = LoadGlobalNoFeedbackDescriptor;
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Smi> ic_kind = CAST(Parameter(Descriptor::kICKind));
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto ic_kind = Parameter<Smi>(Descriptor::kICKind);
LoadGlobalIC_NoFeedback(context, name, ic_kind);
}
@@ -3876,10 +3873,10 @@ void AccessorAssembler::GenerateLoadGlobalIC_NoFeedback() {
void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
using Descriptor = LoadGlobalWithVectorDescriptor;
- TNode<Name> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto name = Parameter<Name>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
ExitPoint direct_exit(this);
LoadGlobalIC(
@@ -3895,9 +3892,9 @@ void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
void AccessorAssembler::GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode) {
using Descriptor = LoadGlobalDescriptor;
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
Callable callable =
@@ -3908,11 +3905,11 @@ void AccessorAssembler::GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode) {
void AccessorAssembler::GenerateKeyedLoadIC() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadIC(&p, LoadAccessMode::kLoad);
@@ -3921,11 +3918,11 @@ void AccessorAssembler::GenerateKeyedLoadIC() {
void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadICGeneric(&p);
@@ -3934,10 +3931,10 @@ void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
using Descriptor = LoadDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kKeyedLoadIC, context, receiver, name, slot,
@@ -3947,10 +3944,10 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
void AccessorAssembler::GenerateKeyedLoadICTrampoline_Megamorphic() {
using Descriptor = LoadDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kKeyedLoadIC_Megamorphic, context, receiver, name,
@@ -3960,11 +3957,11 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline_Megamorphic() {
void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<FeedbackVector> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<FeedbackVector>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadICPolymorphicName(&p, LoadAccessMode::kLoad);
@@ -3973,11 +3970,11 @@ void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() {
void AccessorAssembler::GenerateStoreGlobalIC() {
using Descriptor = StoreGlobalWithVectorDescriptor;
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
StoreICParameters p(context, base::nullopt, name, value, slot, vector);
StoreGlobalIC(&p);
@@ -3986,10 +3983,10 @@ void AccessorAssembler::GenerateStoreGlobalIC() {
void AccessorAssembler::GenerateStoreGlobalICTrampoline() {
using Descriptor = StoreGlobalDescriptor;
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kStoreGlobalIC, context, name, value, slot, vector);
@@ -3998,12 +3995,12 @@ void AccessorAssembler::GenerateStoreGlobalICTrampoline() {
void AccessorAssembler::GenerateStoreIC() {
using Descriptor = StoreWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
StoreICParameters p(context, receiver, name, value, slot, vector);
StoreIC(&p);
@@ -4012,11 +4009,11 @@ void AccessorAssembler::GenerateStoreIC() {
void AccessorAssembler::GenerateStoreICTrampoline() {
using Descriptor = StoreDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kStoreIC, context, receiver, name, value, slot,
@@ -4026,12 +4023,12 @@ void AccessorAssembler::GenerateStoreICTrampoline() {
void AccessorAssembler::GenerateKeyedStoreIC() {
using Descriptor = StoreWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
StoreICParameters p(context, receiver, name, value, slot, vector);
KeyedStoreIC(&p);
@@ -4040,11 +4037,11 @@ void AccessorAssembler::GenerateKeyedStoreIC() {
void AccessorAssembler::GenerateKeyedStoreICTrampoline() {
using Descriptor = StoreDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kKeyedStoreIC, context, receiver, name, value, slot,
@@ -4054,12 +4051,12 @@ void AccessorAssembler::GenerateKeyedStoreICTrampoline() {
void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
using Descriptor = StoreWithVectorDescriptor;
- TNode<Object> array = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> index = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto array = Parameter<Object>(Descriptor::kReceiver);
+ auto index = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
StoreICParameters p(context, array, index, value, slot, vector);
StoreInArrayLiteralIC(&p);
@@ -4067,9 +4064,9 @@ void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
void AccessorAssembler::GenerateCloneObjectIC_Slow() {
using Descriptor = CloneObjectWithVectorDescriptor;
- TNode<Object> source = CAST(Parameter(Descriptor::kSource));
- TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto source = Parameter<Object>(Descriptor::kSource);
+ auto flags = Parameter<Smi>(Descriptor::kFlags);
+ auto context = Parameter<Context>(Descriptor::kContext);
// The Slow case uses the same call interface as CloneObjectIC, so that it
// can be tail called from it. However, the feedback slot and vector are not
@@ -4120,11 +4117,11 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() {
void AccessorAssembler::GenerateCloneObjectIC() {
using Descriptor = CloneObjectWithVectorDescriptor;
- TNode<Object> source = CAST(Parameter(Descriptor::kSource));
- TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> maybe_vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto source = Parameter<Object>(Descriptor::kSource);
+ auto flags = Parameter<Smi>(Descriptor::kFlags);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto maybe_vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
TVARIABLE(MaybeObject, var_handler);
Label if_handler(this, &var_handler), miss(this, Label::kDeferred),
try_polymorphic(this, Label::kDeferred),
@@ -4269,11 +4266,11 @@ void AccessorAssembler::GenerateCloneObjectIC() {
void AccessorAssembler::GenerateKeyedHasIC() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadIC(&p, LoadAccessMode::kHas);
@@ -4282,9 +4279,9 @@ void AccessorAssembler::GenerateKeyedHasIC() {
void AccessorAssembler::GenerateKeyedHasIC_Megamorphic() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto context = Parameter<Context>(Descriptor::kContext);
// TODO(magardn): implement HasProperty handling in KeyedLoadICGeneric
Return(HasProperty(context, receiver, name,
HasPropertyLookupMode::kHasProperty));
@@ -4293,11 +4290,11 @@ void AccessorAssembler::GenerateKeyedHasIC_Megamorphic() {
void AccessorAssembler::GenerateKeyedHasIC_PolymorphicName() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadICPolymorphicName(&p, LoadAccessMode::kHas);
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index 54795d4202..72f43743d2 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -97,9 +97,11 @@ bool CallOptimization::IsCompatibleReceiverMap(Handle<Map> map,
void CallOptimization::Initialize(
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
- if (function_template_info->call_code().IsUndefined(isolate)) return;
+ if (function_template_info->call_code(kAcquireLoad).IsUndefined(isolate))
+ return;
api_call_info_ = handle(
- CallHandlerInfo::cast(function_template_info->call_code()), isolate);
+ CallHandlerInfo::cast(function_template_info->call_code(kAcquireLoad)),
+ isolate);
if (!function_template_info->signature().IsUndefined(isolate)) {
expected_receiver_type_ =
@@ -124,8 +126,9 @@ void CallOptimization::AnalyzePossibleApiFunction(Isolate* isolate,
isolate);
// Require a C++ callback.
- if (info->call_code().IsUndefined(isolate)) return;
- api_call_info_ = handle(CallHandlerInfo::cast(info->call_code()), isolate);
+ HeapObject call_code = info->call_code(kAcquireLoad);
+ if (call_code.IsUndefined(isolate)) return;
+ api_call_info_ = handle(CallHandlerInfo::cast(call_code), isolate);
if (!info->signature().IsUndefined(isolate)) {
expected_receiver_type_ =
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index 73cd228001..8418962172 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -225,8 +225,8 @@ MaybeObjectHandle StoreHandler::StoreTransition(Isolate* isolate,
#ifdef DEBUG
if (!is_dictionary_map) {
InternalIndex descriptor = transition_map->LastAdded();
- Handle<DescriptorArray> descriptors(transition_map->instance_descriptors(),
- isolate);
+ Handle<DescriptorArray> descriptors(
+ transition_map->instance_descriptors(kRelaxedLoad), isolate);
PropertyDetails details = descriptors->GetDetails(descriptor);
if (descriptors->GetKey(descriptor).IsPrivate()) {
DCHECK_EQ(DONT_ENUM, details.attributes());
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 35218f7df5..4c1de81ae3 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -37,8 +37,7 @@ bool IC::IsHandler(MaybeObject object) {
bool IC::vector_needs_update() {
if (state() == NO_FEEDBACK) return false;
return (!vector_set_ &&
- (state() != MEGAMORPHIC ||
- nexus()->GetFeedbackExtra().ToSmi().value() != ELEMENT));
+ (state() != MEGAMORPHIC || nexus()->GetKeyType() != ELEMENT));
}
} // namespace internal
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index d8e25f3c74..b077d4eabf 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -620,17 +620,9 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name,
DCHECK_LE(i, maps_and_handlers.size());
}
- // Reorder the deprecated maps to be at the end, so that
- // minimorphic ICs have the best chance of succeeding as they only
- // check the first FLAG_max_minimorphic_map_checks maps.
- if (deprecated_maps_and_handlers.size() > 0) {
- maps_and_handlers.insert(maps_and_handlers.end(),
- deprecated_maps_and_handlers.begin(),
- deprecated_maps_and_handlers.end());
- }
-
- int number_of_maps = static_cast<int>(maps_and_handlers.size());
int deprecated_maps = static_cast<int>(deprecated_maps_and_handlers.size());
+ int number_of_maps =
+ static_cast<int>(maps_and_handlers.size()) + deprecated_maps;
int number_of_valid_maps =
number_of_maps - deprecated_maps - (handler_to_overwrite != -1);
@@ -655,6 +647,15 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name,
maps_and_handlers.push_back(MapAndHandler(map, handler));
}
+ // Reorder the deprecated maps to be at the end, so that
+ // minimorphic ICs have the best chance of succeeding as they only
+ // check the first FLAG_max_minimorphic_map_checks maps.
+ if (deprecated_maps_and_handlers.size() > 0) {
+ maps_and_handlers.insert(maps_and_handlers.end(),
+ deprecated_maps_and_handlers.begin(),
+ deprecated_maps_and_handlers.end());
+ }
+
ConfigureVectorState(name, maps_and_handlers);
}
@@ -1862,7 +1863,9 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode,
Handle<Map> new_receiver_map) {
std::vector<MapAndHandler> target_maps_and_handlers;
- nexus()->ExtractMapsAndHandlers(&target_maps_and_handlers, true);
+ nexus()->ExtractMapsAndHandlers(
+ &target_maps_and_handlers,
+ [this](Handle<Map> map) { return Map::TryUpdate(isolate(), map); });
if (target_maps_and_handlers.empty()) {
Handle<Map> monomorphic_map = receiver_map;
// If we transitioned to a map that is a more general map than incoming
@@ -2711,7 +2714,7 @@ static bool CanFastCloneObject(Handle<Map> map) {
return false;
}
- DescriptorArray descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors(kRelaxedLoad);
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
Name key = descriptors.GetKey(i);
@@ -2760,8 +2763,8 @@ static Handle<Map> FastCloneObjectMap(Isolate* isolate, Handle<Map> source_map,
map = Map::Copy(isolate, map, "InitializeClonedDescriptors");
}
- Handle<DescriptorArray> source_descriptors(source_map->instance_descriptors(),
- isolate);
+ Handle<DescriptorArray> source_descriptors(
+ source_map->instance_descriptors(kRelaxedLoad), isolate);
int size = source_map->NumberOfOwnDescriptors();
int slack = 0;
Handle<DescriptorArray> descriptors = DescriptorArray::CopyForFastObjectClone(
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 7604e8d8f4..3a0c9076cd 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -1031,10 +1031,10 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
void KeyedStoreGenericAssembler::KeyedStoreGeneric() {
using Descriptor = StoreDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
KeyedStoreGeneric(context, receiver, name, value, Nothing<LanguageMode>());
}
@@ -1050,11 +1050,11 @@ void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
void KeyedStoreGenericAssembler::StoreIC_NoFeedback() {
using Descriptor = StoreDescriptor;
- TNode<Object> receiver_maybe_smi = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver_maybe_smi = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label miss(this, Label::kDeferred), store_property(this);
diff --git a/deps/v8/src/init/DIR_METADATA b/deps/v8/src/init/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/init/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/init/OWNERS b/deps/v8/src/init/OWNERS
index 933637e2e9..8e374f760c 100644
--- a/deps/v8/src/init/OWNERS
+++ b/deps/v8/src/init/OWNERS
@@ -9,5 +9,3 @@ marja@chromium.org
mathias@chromium.org
ulan@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index 990a7804fd..b1a3361919 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -225,6 +225,7 @@ class Genesis {
HARMONY_STAGED(DECLARE_FEATURE_INITIALIZATION)
HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
#undef DECLARE_FEATURE_INITIALIZATION
+ void InitializeGlobal_regexp_linear_flag();
enum ArrayBufferKind {
ARRAY_BUFFER,
@@ -365,6 +366,7 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
if (FLAG_track_detached_contexts) {
isolate_->AddDetachedContext(env);
}
+ DCHECK(global_proxy->IsDetached());
env->native_context().set_microtask_queue(isolate_, nullptr);
}
@@ -1107,7 +1109,7 @@ namespace {
void ReplaceAccessors(Isolate* isolate, Handle<Map> map, Handle<String> name,
PropertyAttributes attributes,
Handle<AccessorPair> accessor_pair) {
- DescriptorArray descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors(kRelaxedLoad);
InternalIndex entry = descriptors.SearchWithCache(isolate, *name, *map);
Descriptor d = Descriptor::AccessorConstant(name, accessor_pair, attributes);
descriptors.Replace(entry, &d);
@@ -1569,8 +1571,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kFastFunctionPrototypeBind, 1, false);
SimpleInstallFunction(isolate_, prototype, "call",
Builtins::kFunctionPrototypeCall, 1, false);
- SimpleInstallFunction(isolate_, prototype, "toString",
- Builtins::kFunctionPrototypeToString, 0, false);
+ Handle<JSFunction> function_to_string =
+ SimpleInstallFunction(isolate_, prototype, "toString",
+ Builtins::kFunctionPrototypeToString, 0, false);
+ native_context()->set_function_to_string(*function_to_string);
// Install the @@hasInstance function.
Handle<JSFunction> has_instance = InstallFunctionAtSymbol(
@@ -2299,6 +2303,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, promise_fun, "all", Builtins::kPromiseAll, 1, true);
native_context()->set_promise_all(*promise_all);
+ InstallFunctionWithBuiltinId(isolate_, promise_fun, "allSettled",
+ Builtins::kPromiseAllSettled, 1, true);
+
InstallFunctionWithBuiltinId(isolate_, promise_fun, "race",
Builtins::kPromiseRace, 1, true);
@@ -3848,6 +3855,7 @@ void Genesis::InitializeExperimentalGlobal() {
HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
HARMONY_INPROGRESS(FEATURE_INITIALIZE_GLOBAL)
#undef FEATURE_INITIALIZE_GLOBAL
+ InitializeGlobal_regexp_linear_flag();
}
bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
@@ -4090,10 +4098,6 @@ void Genesis::InitializeCallSiteBuiltins() {
FunctionInfo infos[] = {
{"getColumnNumber", Builtins::kCallSitePrototypeGetColumnNumber},
- {"getEnclosingColumnNumber",
- Builtins::kCallSitePrototypeGetEnclosingColumnNumber},
- {"getEnclosingLineNumber",
- Builtins::kCallSitePrototypeGetEnclosingLineNumber},
{"getEvalOrigin", Builtins::kCallSitePrototypeGetEvalOrigin},
{"getFileName", Builtins::kCallSitePrototypeGetFileName},
{"getFunction", Builtins::kCallSitePrototypeGetFunction},
@@ -4127,13 +4131,11 @@ void Genesis::InitializeCallSiteBuiltins() {
#define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
void Genesis::InitializeGlobal_##id() {}
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_namespace_exports)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_methods)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_sequence)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_top_level_await)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_logical_assignment)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_assertions)
#ifdef V8_INTL_SUPPORT
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_displaynames_date_types)
@@ -4277,12 +4279,6 @@ void Genesis::InitializeGlobal_harmony_promise_any() {
native_context()->set_promise_any(*promise_any);
}
-void Genesis::InitializeGlobal_harmony_promise_all_settled() {
- if (!FLAG_harmony_promise_all_settled) return;
- SimpleInstallFunction(isolate(), isolate()->promise_function(), "allSettled",
- Builtins::kPromiseAllSettled, 1, true);
-}
-
void Genesis::InitializeGlobal_harmony_regexp_match_indices() {
if (!FLAG_harmony_regexp_match_indices) return;
@@ -4306,6 +4302,20 @@ void Genesis::InitializeGlobal_harmony_string_replaceall() {
Builtins::kStringPrototypeReplaceAll, 2, true);
}
+void Genesis::InitializeGlobal_regexp_linear_flag() {
+ if (!FLAG_enable_experimental_regexp_engine) return;
+
+ Handle<JSFunction> regexp_fun(native_context()->regexp_function(), isolate());
+ Handle<JSObject> regexp_prototype(
+ JSObject::cast(regexp_fun->instance_prototype()), isolate());
+ SimpleInstallGetter(isolate(), regexp_prototype,
+ isolate()->factory()->linear_string(),
+ Builtins::kRegExpPrototypeLinearGetter, true);
+
+ // Store regexp prototype map again after change.
+ native_context()->set_regexp_prototype_map(regexp_prototype->map());
+}
+
#ifdef V8_INTL_SUPPORT
void Genesis::InitializeGlobal_harmony_intl_segmenter() {
@@ -5032,8 +5042,8 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
// The global template must not create properties that already exist
// in the snapshotted global object.
if (from->HasFastProperties()) {
- Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(from->map().instance_descriptors(), isolate());
+ Handle<DescriptorArray> descs = Handle<DescriptorArray>(
+ from->map().instance_descriptors(kRelaxedLoad), isolate());
for (InternalIndex i : from->map().IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
if (details.location() == kField) {
@@ -5162,7 +5172,8 @@ Handle<Map> Genesis::CreateInitialMapForArraySubclass(int size,
{
JSFunction array_function = native_context()->array_function();
Handle<DescriptorArray> array_descriptors(
- array_function.initial_map().instance_descriptors(), isolate());
+ array_function.initial_map().instance_descriptors(kRelaxedLoad),
+ isolate());
Handle<String> length = factory()->length_string();
InternalIndex old = array_descriptors->SearchWithCache(
isolate(), *length, array_function.initial_map());
@@ -5278,6 +5289,14 @@ Genesis::Genesis(
}
}
+ // TODO(v8:10391): The reason is that the NativeContext::microtask_queue
+ // serialization is not actually supported, and therefore the field is
+ // serialized as raw data instead of being serialized as ExternalReference.
+ // As a result, when V8 heap sandbox is enabled, the external pointer entry
+ // is not allocated for microtask queue field during deserialization, so we
+ // allocate it manually here.
+ native_context()->AllocateExternalPointerEntries(isolate);
+
native_context()->set_microtask_queue(
isolate, microtask_queue ? static_cast<MicrotaskQueue*>(microtask_queue)
: isolate->default_microtask_queue());
diff --git a/deps/v8/src/init/heap-symbols.h b/deps/v8/src/init/heap-symbols.h
index 1188411efc..eaf441d6e8 100644
--- a/deps/v8/src/init/heap-symbols.h
+++ b/deps/v8/src/init/heap-symbols.h
@@ -133,6 +133,7 @@
V(_, ArrayBuffer_string, "ArrayBuffer") \
V(_, ArrayIterator_string, "Array Iterator") \
V(_, as_string, "as") \
+ V(_, assert_string, "assert") \
V(_, async_string, "async") \
V(_, auto_string, "auto") \
V(_, await_string, "await") \
@@ -225,6 +226,7 @@
V(_, length_string, "length") \
V(_, let_string, "let") \
V(_, line_string, "line") \
+ V(_, linear_string, "linear") \
V(_, LinkError_string, "LinkError") \
V(_, long_string, "long") \
V(_, Map_string, "Map") \
@@ -344,7 +346,6 @@
V(_, error_script_symbol) \
V(_, error_start_pos_symbol) \
V(_, frozen_symbol) \
- V(_, generic_symbol) \
V(_, home_object_symbol) \
V(_, interpreter_trampoline_symbol) \
V(_, megamorphic_symbol) \
@@ -495,7 +496,6 @@
F(SCAVENGER_FAST_PROMOTE) \
F(SCAVENGER_FREE_REMEMBERED_SET) \
F(SCAVENGER_SCAVENGE) \
- F(SCAVENGER_PROCESS_ARRAY_BUFFERS) \
F(SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY) \
F(SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS) \
F(SCAVENGER_SCAVENGE_PARALLEL) \
@@ -510,6 +510,7 @@
#define TRACER_BACKGROUND_SCOPES(F) \
F(BACKGROUND_ARRAY_BUFFER_FREE) \
F(BACKGROUND_ARRAY_BUFFER_SWEEP) \
+ F(BACKGROUND_COLLECTION) \
F(BACKGROUND_STORE_BUFFER) \
F(BACKGROUND_UNMAPPER) \
F(MC_BACKGROUND_EVACUATE_COPY) \
diff --git a/deps/v8/src/init/isolate-allocator.cc b/deps/v8/src/init/isolate-allocator.cc
index b9ec6c3f43..01ae416181 100644
--- a/deps/v8/src/init/isolate-allocator.cc
+++ b/deps/v8/src/init/isolate-allocator.cc
@@ -12,20 +12,16 @@
namespace v8 {
namespace internal {
-IsolateAllocator::IsolateAllocator(IsolateAllocationMode mode) {
-#if V8_TARGET_ARCH_64_BIT
- if (mode == IsolateAllocationMode::kInV8Heap) {
- Address heap_reservation_address = InitReservation();
- CommitPagesForIsolate(heap_reservation_address);
- return;
- }
-#endif // V8_TARGET_ARCH_64_BIT
-
+IsolateAllocator::IsolateAllocator() {
+#ifdef V8_COMPRESS_POINTERS
+ Address heap_reservation_address = InitReservation();
+ CommitPagesForIsolate(heap_reservation_address);
+#else
// Allocate Isolate in C++ heap.
- CHECK_EQ(mode, IsolateAllocationMode::kInCppHeap);
page_allocator_ = GetPlatformPageAllocator();
isolate_memory_ = ::operator new(sizeof(Isolate));
DCHECK(!reservation_.IsReserved());
+#endif // V8_COMPRESS_POINTERS
}
IsolateAllocator::~IsolateAllocator() {
@@ -38,7 +34,7 @@ IsolateAllocator::~IsolateAllocator() {
::operator delete(isolate_memory_);
}
-#if V8_TARGET_ARCH_64_BIT
+#ifdef V8_COMPRESS_POINTERS
namespace {
@@ -192,7 +188,7 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) {
}
isolate_memory_ = reinterpret_cast<void*>(isolate_address);
}
-#endif // V8_TARGET_ARCH_64_BIT
+#endif // V8_COMPRESS_POINTERS
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/init/isolate-allocator.h b/deps/v8/src/init/isolate-allocator.h
index c176bf32cb..8d843702cc 100644
--- a/deps/v8/src/init/isolate-allocator.h
+++ b/deps/v8/src/init/isolate-allocator.h
@@ -22,7 +22,8 @@ class BoundedPageAllocator;
namespace internal {
// IsolateAllocator object is responsible for allocating memory for one (!)
-// Isolate object. Depending on the allocation mode the memory can be allocated
+// Isolate object. Depending on the whether pointer compression is enabled,
+// the memory can be allocated
// 1) in the C++ heap (when pointer compression is disabled)
// 2) in a proper part of a properly aligned region of a reserved address space
// (when pointer compression is enabled).
@@ -34,18 +35,13 @@ namespace internal {
// Isolate::Delete() takes care of the proper order of the objects destruction.
class V8_EXPORT_PRIVATE IsolateAllocator final {
public:
- explicit IsolateAllocator(IsolateAllocationMode mode);
+ IsolateAllocator();
~IsolateAllocator();
void* isolate_memory() const { return isolate_memory_; }
v8::PageAllocator* page_allocator() const { return page_allocator_; }
- IsolateAllocationMode mode() {
- return reservation_.IsReserved() ? IsolateAllocationMode::kInV8Heap
- : IsolateAllocationMode::kInCppHeap;
- }
-
private:
Address InitReservation();
void CommitPagesForIsolate(Address heap_reservation_address);
diff --git a/deps/v8/src/inspector/DIR_METADATA b/deps/v8/src/inspector/DIR_METADATA
new file mode 100644
index 0000000000..3ba1106a5f
--- /dev/null
+++ b/deps/v8/src/inspector/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Platform>DevTools>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/inspector/OWNERS b/deps/v8/src/inspector/OWNERS
index faa8c326b0..ea8456bbe5 100644
--- a/deps/v8/src/inspector/OWNERS
+++ b/deps/v8/src/inspector/OWNERS
@@ -7,5 +7,3 @@ szuend@chromium.org
yangguo@chromium.org
per-file PRESUBMIT.py=file:../../INFRA_OWNERS
-
-# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index acd9609a9c..c72c531178 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -34,6 +34,8 @@
#include <unordered_set>
#include "../../third_party/inspector_protocol/crdtp/json.h"
+#include "include/v8-inspector.h"
+#include "src/debug/debug-interface.h"
#include "src/inspector/custom-preview.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
@@ -46,8 +48,6 @@
#include "src/inspector/v8-value-utils.h"
#include "src/inspector/value-mirror.h"
-#include "include/v8-inspector.h"
-
namespace v8_inspector {
namespace {
@@ -518,19 +518,9 @@ Response InjectedScript::getInternalAndPrivateProperties(
}
void InjectedScript::releaseObject(const String16& objectId) {
- std::vector<uint8_t> cbor;
- v8_crdtp::json::ConvertJSONToCBOR(
- v8_crdtp::span<uint16_t>(objectId.characters16(), objectId.length()),
- &cbor);
- std::unique_ptr<protocol::Value> parsedObjectId =
- protocol::Value::parseBinary(cbor.data(), cbor.size());
- if (!parsedObjectId) return;
- protocol::DictionaryValue* object =
- protocol::DictionaryValue::cast(parsedObjectId.get());
- if (!object) return;
- int boundId = 0;
- if (!object->getInteger("id", &boundId)) return;
- unbindObject(boundId);
+ std::unique_ptr<RemoteObjectId> remoteId;
+ Response response = RemoteObjectId::parse(objectId, &remoteId);
+ if (response.IsSuccess()) unbindObject(remoteId->id());
}
Response InjectedScript::wrapObject(
@@ -722,10 +712,12 @@ Response InjectedScript::resolveCallArgument(
Response response =
RemoteObjectId::parse(callArgument->getObjectId(""), &remoteObjectId);
if (!response.IsSuccess()) return response;
- if (remoteObjectId->contextId() != m_context->contextId())
+ if (remoteObjectId->contextId() != m_context->contextId() ||
+ remoteObjectId->isolateId() != m_context->inspector()->isolateId()) {
return Response::ServerError(
"Argument should belong to the same JavaScript world as target "
"object");
+ }
return findObject(*remoteObjectId, result);
}
if (callArgument->hasValue() || callArgument->hasUnserializableValue()) {
@@ -861,6 +853,7 @@ Response InjectedScript::wrapEvaluateResult(
v8::Local<v8::Object> InjectedScript::commandLineAPI() {
if (m_commandLineAPI.IsEmpty()) {
+ v8::debug::DisableBreakScope disable_break(m_context->isolate());
m_commandLineAPI.Reset(
m_context->isolate(),
m_context->inspector()->console()->createCommandLineAPI(
@@ -1011,10 +1004,8 @@ String16 InjectedScript::bindObject(v8::Local<v8::Value> value,
m_idToObjectGroupName[id] = groupName;
m_nameToObjectGroup[groupName].push_back(id);
}
- // TODO(dgozman): get rid of "injectedScript" notion.
- return String16::concat(
- "{\"injectedScriptId\":", String16::fromInteger(m_context->contextId()),
- ",\"id\":", String16::fromInteger(id), "}");
+ return RemoteObjectId::serialize(m_context->inspector()->isolateId(),
+ m_context->contextId(), id);
}
// static
diff --git a/deps/v8/src/inspector/remote-object-id.cc b/deps/v8/src/inspector/remote-object-id.cc
index e3c67bb6c9..330cdb2b66 100644
--- a/deps/v8/src/inspector/remote-object-id.cc
+++ b/deps/v8/src/inspector/remote-object-id.cc
@@ -10,63 +10,68 @@
namespace v8_inspector {
-RemoteObjectIdBase::RemoteObjectIdBase() : m_injectedScriptId(0) {}
+namespace {
-std::unique_ptr<protocol::DictionaryValue>
-RemoteObjectIdBase::parseInjectedScriptId(const String16& objectId) {
- std::vector<uint8_t> cbor;
- v8_crdtp::json::ConvertJSONToCBOR(
- v8_crdtp::span<uint16_t>(objectId.characters16(), objectId.length()),
- &cbor);
- std::unique_ptr<protocol::Value> parsedValue =
- protocol::Value::parseBinary(cbor.data(), cbor.size());
- if (!parsedValue || parsedValue->type() != protocol::Value::TypeObject)
- return nullptr;
-
- std::unique_ptr<protocol::DictionaryValue> parsedObjectId(
- protocol::DictionaryValue::cast(parsedValue.release()));
- bool success =
- parsedObjectId->getInteger("injectedScriptId", &m_injectedScriptId);
- if (success) return parsedObjectId;
- return nullptr;
+String16 serializeId(uint64_t isolateId, int injectedScriptId, int id) {
+ return String16::concat(
+ String16::fromInteger64(static_cast<int64_t>(isolateId)), ".",
+ String16::fromInteger(injectedScriptId), ".", String16::fromInteger(id));
}
-RemoteObjectId::RemoteObjectId() : RemoteObjectIdBase(), m_id(0) {}
+} // namespace
+
+RemoteObjectIdBase::RemoteObjectIdBase()
+ : m_isolateId(0), m_injectedScriptId(0), m_id(0) {}
+
+bool RemoteObjectIdBase::parseId(const String16& objectId) {
+ const UChar dot = '.';
+ size_t firstDotPos = objectId.find(dot);
+ if (firstDotPos == String16::kNotFound) return false;
+ bool ok = false;
+ int64_t isolateId = objectId.substring(0, firstDotPos).toInteger64(&ok);
+ if (!ok) return false;
+ firstDotPos++;
+ size_t secondDotPos = objectId.find(dot, firstDotPos);
+ if (secondDotPos == String16::kNotFound) return false;
+ int injectedScriptId =
+ objectId.substring(firstDotPos, secondDotPos - firstDotPos)
+ .toInteger(&ok);
+ if (!ok) return false;
+ secondDotPos++;
+ int id = objectId.substring(secondDotPos).toInteger(&ok);
+ if (!ok) return false;
+ m_isolateId = static_cast<uint64_t>(isolateId);
+ m_injectedScriptId = injectedScriptId;
+ m_id = id;
+ return true;
+}
Response RemoteObjectId::parse(const String16& objectId,
std::unique_ptr<RemoteObjectId>* result) {
std::unique_ptr<RemoteObjectId> remoteObjectId(new RemoteObjectId());
- std::unique_ptr<protocol::DictionaryValue> parsedObjectId =
- remoteObjectId->parseInjectedScriptId(objectId);
- if (!parsedObjectId) return Response::ServerError("Invalid remote object id");
-
- bool success = parsedObjectId->getInteger("id", &remoteObjectId->m_id);
- if (!success) return Response::ServerError("Invalid remote object id");
+ if (!remoteObjectId->parseId(objectId))
+ return Response::ServerError("Invalid remote object id");
*result = std::move(remoteObjectId);
return Response::Success();
}
-RemoteCallFrameId::RemoteCallFrameId()
- : RemoteObjectIdBase(), m_frameOrdinal(0) {}
+String16 RemoteObjectId::serialize(uint64_t isolateId, int injectedScriptId,
+ int id) {
+ return serializeId(isolateId, injectedScriptId, id);
+}
Response RemoteCallFrameId::parse(const String16& objectId,
std::unique_ptr<RemoteCallFrameId>* result) {
std::unique_ptr<RemoteCallFrameId> remoteCallFrameId(new RemoteCallFrameId());
- std::unique_ptr<protocol::DictionaryValue> parsedObjectId =
- remoteCallFrameId->parseInjectedScriptId(objectId);
- if (!parsedObjectId) return Response::ServerError("Invalid call frame id");
-
- bool success =
- parsedObjectId->getInteger("ordinal", &remoteCallFrameId->m_frameOrdinal);
- if (!success) return Response::ServerError("Invalid call frame id");
+ if (!remoteCallFrameId->parseId(objectId))
+ return Response::ServerError("Invalid call frame id");
*result = std::move(remoteCallFrameId);
return Response::Success();
}
-String16 RemoteCallFrameId::serialize(int injectedScriptId, int frameOrdinal) {
- return "{\"ordinal\":" + String16::fromInteger(frameOrdinal) +
- ",\"injectedScriptId\":" + String16::fromInteger(injectedScriptId) +
- "}";
+String16 RemoteCallFrameId::serialize(uint64_t isolateId, int injectedScriptId,
+ int frameOrdinal) {
+ return serializeId(isolateId, injectedScriptId, frameOrdinal);
}
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/remote-object-id.h b/deps/v8/src/inspector/remote-object-id.h
index 5a35c13e58..1c60124120 100644
--- a/deps/v8/src/inspector/remote-object-id.h
+++ b/deps/v8/src/inspector/remote-object-id.h
@@ -15,16 +15,18 @@ using protocol::Response;
class RemoteObjectIdBase {
public:
+ uint64_t isolateId() const { return m_isolateId; }
int contextId() const { return m_injectedScriptId; }
protected:
RemoteObjectIdBase();
~RemoteObjectIdBase() = default;
- std::unique_ptr<protocol::DictionaryValue> parseInjectedScriptId(
- const String16&);
+ bool parseId(const String16&);
+ uint64_t m_isolateId;
int m_injectedScriptId;
+ int m_id;
};
class RemoteObjectId final : public RemoteObjectIdBase {
@@ -33,10 +35,7 @@ class RemoteObjectId final : public RemoteObjectIdBase {
~RemoteObjectId() = default;
int id() const { return m_id; }
- private:
- RemoteObjectId();
-
- int m_id;
+ static String16 serialize(uint64_t isolateId, int injectedScriptId, int id);
};
class RemoteCallFrameId final : public RemoteObjectIdBase {
@@ -44,14 +43,10 @@ class RemoteCallFrameId final : public RemoteObjectIdBase {
static Response parse(const String16&, std::unique_ptr<RemoteCallFrameId>*);
~RemoteCallFrameId() = default;
- int frameOrdinal() const { return m_frameOrdinal; }
-
- static String16 serialize(int injectedScriptId, int frameOrdinal);
-
- private:
- RemoteCallFrameId();
+ int frameOrdinal() const { return m_id; }
- int m_frameOrdinal;
+ static String16 serialize(uint64_t isolateId, int injectedScriptId,
+ int frameOrdinal);
};
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index 4143f6c311..88a7584255 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -39,10 +39,12 @@ class String16 {
static String16 fromInteger(int);
static String16 fromInteger(size_t);
static String16 fromInteger64(int64_t);
+ static String16 fromUInt64(uint64_t);
static String16 fromDouble(double);
static String16 fromDouble(double, int precision);
int64_t toInteger64(bool* ok = nullptr) const;
+ uint64_t toUInt64(bool* ok = nullptr) const;
int toInteger(bool* ok = nullptr) const;
String16 stripWhiteSpace() const;
const UChar* characters16() const { return m_impl.c_str(); }
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index 6dda6ef90c..12645cecbc 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -872,7 +872,6 @@ V8Console::CommandLineAPIScope::~CommandLineAPIScope() {
->GetOwnPropertyDescriptor(
m_context, v8::Local<v8::String>::Cast(name))
.ToLocal(&descriptor);
- DCHECK(success);
USE(success);
}
}
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 399fa4c409..f82ce98600 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -1446,8 +1446,8 @@ Response V8DebuggerAgentImpl::currentCallFrames(
int contextId = iterator->GetContextId();
InjectedScript* injectedScript = nullptr;
if (contextId) m_session->findInjectedScript(contextId, injectedScript);
- String16 callFrameId =
- RemoteCallFrameId::serialize(contextId, frameOrdinal);
+ String16 callFrameId = RemoteCallFrameId::serialize(
+ m_inspector->isolateId(), contextId, frameOrdinal);
v8::debug::Location loc = iterator->GetSourceLocation();
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 6549308cc4..f1330dcf12 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -502,6 +502,10 @@ size_t HeapLimitForDebugging(size_t initial_heap_limit) {
size_t V8Debugger::nearHeapLimitCallback(void* data, size_t current_heap_limit,
size_t initial_heap_limit) {
V8Debugger* thisPtr = static_cast<V8Debugger*>(data);
+// TODO(solanes, v8:10876): Remove when bug is solved.
+#if DEBUG
+ printf("nearHeapLimitCallback\n");
+#endif
thisPtr->m_originalHeapLimit = current_heap_limit;
thisPtr->m_scheduledOOMBreak = true;
v8::Local<v8::Context> context =
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index 8db491bf68..4303b35c62 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -239,6 +239,8 @@ Response V8InspectorSessionImpl::findInjectedScript(
Response V8InspectorSessionImpl::findInjectedScript(
RemoteObjectIdBase* objectId, InjectedScript*& injectedScript) {
+ if (objectId->isolateId() != m_inspector->isolateId())
+ return Response::ServerError("Cannot find context with specified id");
return findInjectedScript(objectId->contextId(), injectedScript);
}
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index ac505be5cc..2109348d07 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -56,6 +56,7 @@ static const char customObjectFormatterEnabled[] =
"customObjectFormatterEnabled";
static const char runtimeEnabled[] = "runtimeEnabled";
static const char bindings[] = "bindings";
+static const char globalBindingsKey[] = "";
} // namespace V8RuntimeAgentImplState
using protocol::Runtime::RemoteObject;
@@ -663,32 +664,61 @@ void V8RuntimeAgentImpl::terminateExecution(
m_inspector->debugger()->terminateExecution(std::move(callback));
}
+namespace {
+protocol::DictionaryValue* getOrCreateDictionary(
+ protocol::DictionaryValue* dict, const String16& key) {
+ if (protocol::DictionaryValue* bindings = dict->getObject(key))
+ return bindings;
+ dict->setObject(key, protocol::DictionaryValue::create());
+ return dict->getObject(key);
+}
+} // namespace
+
Response V8RuntimeAgentImpl::addBinding(const String16& name,
- Maybe<int> executionContextId) {
- if (!m_state->getObject(V8RuntimeAgentImplState::bindings)) {
- m_state->setObject(V8RuntimeAgentImplState::bindings,
- protocol::DictionaryValue::create());
- }
- protocol::DictionaryValue* bindings =
- m_state->getObject(V8RuntimeAgentImplState::bindings);
- if (bindings->booleanProperty(name, false)) return Response::Success();
+ Maybe<int> executionContextId,
+ Maybe<String16> executionContextName) {
+ if (m_activeBindings.count(name)) return Response::Success();
if (executionContextId.isJust()) {
+ if (executionContextName.isJust()) {
+ return Response::InvalidParams(
+ "executionContextName is mutually exclusive with executionContextId");
+ }
int contextId = executionContextId.fromJust();
InspectedContext* context =
m_inspector->getContext(m_session->contextGroupId(), contextId);
if (!context) {
- return Response::ServerError(
+ return Response::InvalidParams(
"Cannot find execution context with given executionContextId");
}
addBinding(context, name);
- // false means that we should not add this binding later.
- bindings->setBoolean(name, false);
return Response::Success();
}
- bindings->setBoolean(name, true);
+
+ // If it's a globally exposed binding, i.e. no context name specified, use
+ // a special value for the context name.
+ String16 contextKey = V8RuntimeAgentImplState::globalBindingsKey;
+ if (executionContextName.isJust()) {
+ contextKey = executionContextName.fromJust();
+ if (contextKey == V8RuntimeAgentImplState::globalBindingsKey) {
+ return Response::InvalidParams("Invalid executionContextName");
+ }
+ }
+ // Only persist non context-specific bindings, as contextIds don't make
+ // any sense when state is restored in a different process.
+ protocol::DictionaryValue* bindings =
+ getOrCreateDictionary(m_state, V8RuntimeAgentImplState::bindings);
+ protocol::DictionaryValue* contextBindings =
+ getOrCreateDictionary(bindings, contextKey);
+ contextBindings->setBoolean(name, true);
+
m_inspector->forEachContext(
m_session->contextGroupId(),
- [&name, this](InspectedContext* context) { addBinding(context, name); });
+ [&name, &executionContextName, this](InspectedContext* context) {
+ if (executionContextName.isJust() &&
+ executionContextName.fromJust() != context->humanReadableName())
+ return;
+ addBinding(context, name);
+ });
return Response::Success();
}
@@ -730,34 +760,42 @@ void V8RuntimeAgentImpl::addBinding(InspectedContext* context,
.ToLocal(&functionValue)) {
v8::Maybe<bool> success = global->Set(localContext, v8Name, functionValue);
USE(success);
+ m_activeBindings.insert(name);
}
}
Response V8RuntimeAgentImpl::removeBinding(const String16& name) {
protocol::DictionaryValue* bindings =
m_state->getObject(V8RuntimeAgentImplState::bindings);
- if (!bindings) return Response::Success();
- bindings->remove(name);
+ if (bindings) bindings->remove(name);
+ m_activeBindings.erase(name);
return Response::Success();
}
void V8RuntimeAgentImpl::bindingCalled(const String16& name,
const String16& payload,
int executionContextId) {
- protocol::DictionaryValue* bindings =
- m_state->getObject(V8RuntimeAgentImplState::bindings);
- if (!bindings || !bindings->get(name)) return;
+ if (!m_activeBindings.count(name)) return;
m_frontend.bindingCalled(name, payload, executionContextId);
}
void V8RuntimeAgentImpl::addBindings(InspectedContext* context) {
+ const String16 contextName = context->humanReadableName();
if (!m_enabled) return;
protocol::DictionaryValue* bindings =
m_state->getObject(V8RuntimeAgentImplState::bindings);
if (!bindings) return;
- for (size_t i = 0; i < bindings->size(); ++i) {
- if (!bindings->at(i).second) continue;
- addBinding(context, bindings->at(i).first);
+ protocol::DictionaryValue* globalBindings =
+ bindings->getObject(V8RuntimeAgentImplState::globalBindingsKey);
+ if (globalBindings) {
+ for (size_t i = 0; i < globalBindings->size(); ++i)
+ addBinding(context, globalBindings->at(i).first);
+ }
+ protocol::DictionaryValue* contextBindings =
+ contextName.isEmpty() ? nullptr : bindings->getObject(contextName);
+ if (contextBindings) {
+ for (size_t i = 0; i < contextBindings->size(); ++i)
+ addBinding(context, contextBindings->at(i).first);
}
}
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.h b/deps/v8/src/inspector/v8-runtime-agent-impl.h
index d0491eac5a..80c2096fed 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.h
@@ -32,14 +32,14 @@
#define V8_INSPECTOR_V8_RUNTIME_AGENT_IMPL_H_
#include <memory>
+#include <set>
#include <unordered_map>
+#include "include/v8.h"
#include "src/base/macros.h"
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/Runtime.h"
-#include "include/v8.h"
-
namespace v8_inspector {
class InjectedScript;
@@ -117,8 +117,8 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
void terminateExecution(
std::unique_ptr<TerminateExecutionCallback> callback) override;
- Response addBinding(const String16& name,
- Maybe<int> executionContextId) override;
+ Response addBinding(const String16& name, Maybe<int> executionContextId,
+ Maybe<String16> executionContextName) override;
Response removeBinding(const String16& name) override;
void addBindings(InspectedContext* context);
@@ -145,6 +145,7 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
bool m_enabled;
std::unordered_map<String16, std::unique_ptr<v8::Global<v8::Script>>>
m_compiledScripts;
+ std::set<String16> m_activeBindings;
DISALLOW_COPY_AND_ASSIGN(V8RuntimeAgentImpl);
};
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index e08af26ad7..f2fc99e389 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -9,6 +9,7 @@
#include "../../third_party/inspector_protocol/crdtp/json.h"
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-inspector-impl.h"
+#include "src/tracing/trace-event.h"
using v8_crdtp::SpanFrom;
using v8_crdtp::json::ConvertCBORToJSON;
@@ -34,6 +35,10 @@ std::vector<std::shared_ptr<StackFrame>> toFramesVector(
int maxStackSize) {
DCHECK(debugger->isolate()->InContext());
int frameCount = std::min(v8StackTrace->GetFrameCount(), maxStackSize);
+
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
+ "SymbolizeStackTrace", "frameCount", frameCount);
+
std::vector<std::shared_ptr<StackFrame>> frames(frameCount);
for (int i = 0; i < frameCount; ++i) {
frames[i] =
@@ -253,6 +258,10 @@ std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::create(
std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::capture(
V8Debugger* debugger, int contextGroupId, int maxStackSize) {
DCHECK(debugger);
+
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
+ "V8StackTraceImpl::capture", "maxFrameCount", maxStackSize);
+
v8::Isolate* isolate = debugger->isolate();
v8::HandleScope handleScope(isolate);
v8::Local<v8::StackTrace> v8StackTrace;
@@ -404,6 +413,9 @@ std::shared_ptr<AsyncStackTrace> AsyncStackTrace::capture(
int maxStackSize) {
DCHECK(debugger);
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
+ "AsyncStackTrace::capture", "maxFrameCount", maxStackSize);
+
v8::Isolate* isolate = debugger->isolate();
v8::HandleScope handleScope(isolate);
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index 6bfb3dc3e4..e6f66c9821 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -272,6 +272,7 @@ String16 descriptionForRegExp(v8::Isolate* isolate,
v8::RegExp::Flags flags = value->GetFlags();
if (flags & v8::RegExp::Flags::kGlobal) description.append('g');
if (flags & v8::RegExp::Flags::kIgnoreCase) description.append('i');
+ if (flags & v8::RegExp::Flags::kLinear) description.append('l');
if (flags & v8::RegExp::Flags::kMultiline) description.append('m');
if (flags & v8::RegExp::Flags::kDotAll) description.append('s');
if (flags & v8::RegExp::Flags::kUnicode) description.append('u');
@@ -1738,14 +1739,36 @@ String16 descriptionForNode(v8::Local<v8::Context> context,
return description;
}
+String16 descriptionForTrustedType(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value) {
+ if (!value->IsObject()) return String16();
+ v8::Local<v8::Object> object = value.As<v8::Object>();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::TryCatch tryCatch(isolate);
+
+ v8::Local<v8::String> description;
+ if (!object->ToString(context).ToLocal(&description)) return String16();
+ return toProtocolString(isolate, description);
+}
+
std::unique_ptr<ValueMirror> clientMirror(v8::Local<v8::Context> context,
v8::Local<v8::Value> value,
const String16& subtype) {
// TODO(alph): description and length retrieval should move to embedder.
+ auto descriptionForValueSubtype =
+ clientFor(context)->descriptionForValueSubtype(context, value);
+ if (descriptionForValueSubtype) {
+ return std::make_unique<ObjectMirror>(
+ value, subtype, toString16(descriptionForValueSubtype->string()));
+ }
if (subtype == "node") {
return std::make_unique<ObjectMirror>(value, subtype,
descriptionForNode(context, value));
}
+ if (subtype == "trustedtype") {
+ return std::make_unique<ObjectMirror>(
+ value, subtype, descriptionForTrustedType(context, value));
+ }
if (subtype == "error") {
return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Error,
diff --git a/deps/v8/src/interpreter/DIR_METADATA b/deps/v8/src/interpreter/DIR_METADATA
new file mode 100644
index 0000000000..3de1f73a3d
--- /dev/null
+++ b/deps/v8/src/interpreter/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Interpreter"
+} \ No newline at end of file
diff --git a/deps/v8/src/interpreter/OWNERS b/deps/v8/src/interpreter/OWNERS
index f013999565..481caea50b 100644
--- a/deps/v8/src/interpreter/OWNERS
+++ b/deps/v8/src/interpreter/OWNERS
@@ -1,5 +1,3 @@
leszeks@chromium.org
mythria@chromium.org
rmcilroy@chromium.org
-
-# COMPONENT: Blink>JavaScript>Interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index 86a42a5570..e536c52228 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -91,6 +91,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
BytecodeArrayAccessor(Handle<BytecodeArray> bytecode_array,
int initial_offset);
+ BytecodeArrayAccessor(const BytecodeArrayAccessor&) = delete;
+ BytecodeArrayAccessor& operator=(const BytecodeArrayAccessor&) = delete;
+
void SetOffset(int offset);
void ApplyDebugBreak();
@@ -157,8 +160,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
int bytecode_offset_;
OperandScale operand_scale_;
int prefix_offset_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeArrayAccessor);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 74c2065355..dc41db71c4 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -77,8 +77,7 @@ Register BytecodeArrayBuilder::Receiver() const {
}
Register BytecodeArrayBuilder::Local(int index) const {
- // TODO(marja): Make a DCHECK once crbug.com/706234 is fixed.
- CHECK_LT(index, locals_count());
+ DCHECK_LT(index, locals_count());
return Register(index);
}
@@ -1330,6 +1329,12 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ThrowSuperAlreadyCalledIfNotHole() {
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::ThrowIfNotSuperConstructor(
+ Register constructor) {
+ OutputThrowIfNotSuperConstructor(constructor);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Debugger() {
OutputDebugger();
return *this;
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index ff7e9cb21f..b03cebdd60 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -43,6 +43,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
SourcePositionTableBuilder::RecordingMode source_position_mode =
SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS);
+ BytecodeArrayBuilder(const BytecodeArrayBuilder&) = delete;
+ BytecodeArrayBuilder& operator=(const BytecodeArrayBuilder&) = delete;
+
template <typename LocalIsolate>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
Handle<BytecodeArray> ToBytecodeArray(LocalIsolate* isolate);
@@ -459,6 +462,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& ThrowReferenceErrorIfHole(const AstRawString* name);
BytecodeArrayBuilder& ThrowSuperNotCalledIfHole();
BytecodeArrayBuilder& ThrowSuperAlreadyCalledIfNotHole();
+ BytecodeArrayBuilder& ThrowIfNotSuperConstructor(Register constructor);
// Debugger.
BytecodeArrayBuilder& Debugger();
@@ -643,8 +647,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeRegisterOptimizer* register_optimizer_;
BytecodeSourceInfo latest_source_info_;
BytecodeSourceInfo deferred_source_info_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
};
V8_EXPORT_PRIVATE std::ostream& operator<<(
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index b992ffc037..58b0b1a55a 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -20,11 +20,11 @@ class V8_EXPORT_PRIVATE BytecodeArrayIterator final
explicit BytecodeArrayIterator(Handle<BytecodeArray> array);
+ BytecodeArrayIterator(const BytecodeArrayIterator&) = delete;
+ BytecodeArrayIterator& operator=(const BytecodeArrayIterator&) = delete;
+
void Advance();
bool done() const;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(BytecodeArrayIterator);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.h b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
index 68905a146c..99fe758bbb 100644
--- a/deps/v8/src/interpreter/bytecode-array-random-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
@@ -23,6 +23,10 @@ class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final
BytecodeArrayRandomIterator(Handle<BytecodeArray> bytecode_array, Zone* zone);
+ BytecodeArrayRandomIterator(const BytecodeArrayRandomIterator&) = delete;
+ BytecodeArrayRandomIterator& operator=(const BytecodeArrayRandomIterator&) =
+ delete;
+
BytecodeArrayRandomIterator& operator++() {
++current_index_;
UpdateOffsetFromIndex();
@@ -72,8 +76,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final
void Initialize();
void UpdateOffsetFromIndex();
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeArrayRandomIterator);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index c1f4266e49..6517ad9f5e 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -36,6 +36,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
BytecodeArrayWriter(
Zone* zone, ConstantArrayBuilder* constant_array_builder,
SourcePositionTableBuilder::RecordingMode source_position_mode);
+ BytecodeArrayWriter(const BytecodeArrayWriter&) = delete;
+ BytecodeArrayWriter& operator=(const BytecodeArrayWriter&) = delete;
void Write(BytecodeNode* node);
void WriteJump(BytecodeNode* node, BytecodeLabel* label);
@@ -126,7 +128,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
bool exit_seen_in_block_;
friend class bytecode_array_writer_unittest::BytecodeArrayWriterUnittest;
- DISALLOW_COPY_AND_ASSIGN(BytecodeArrayWriter);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 64ffa39b20..675715420b 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -108,6 +108,8 @@ class BytecodeGenerator::ControlScope {
generator_->set_execution_control(this);
}
virtual ~ControlScope() { generator_->set_execution_control(outer()); }
+ ControlScope(const ControlScope&) = delete;
+ ControlScope& operator=(const ControlScope&) = delete;
void Break(Statement* stmt) {
PerformCommand(CMD_BREAK, stmt, kNoSourcePosition);
@@ -154,8 +156,6 @@ class BytecodeGenerator::ControlScope {
BytecodeGenerator* generator_;
ControlScope* outer_;
ContextScope* context_;
-
- DISALLOW_COPY_AND_ASSIGN(ControlScope);
};
// Helper class for a try-finally control scope. It can record intercepted
@@ -562,13 +562,14 @@ class BytecodeGenerator::RegisterAllocationScope final {
outer_next_register_index_);
}
+ RegisterAllocationScope(const RegisterAllocationScope&) = delete;
+ RegisterAllocationScope& operator=(const RegisterAllocationScope&) = delete;
+
BytecodeGenerator* generator() const { return generator_; }
private:
BytecodeGenerator* generator_;
int outer_next_register_index_;
-
- DISALLOW_COPY_AND_ASSIGN(RegisterAllocationScope);
};
class BytecodeGenerator::AccumulatorPreservingScope final {
@@ -591,11 +592,13 @@ class BytecodeGenerator::AccumulatorPreservingScope final {
}
}
+ AccumulatorPreservingScope(const AccumulatorPreservingScope&) = delete;
+ AccumulatorPreservingScope& operator=(const AccumulatorPreservingScope&) =
+ delete;
+
private:
BytecodeGenerator* generator_;
Register saved_accumulator_register_;
-
- DISALLOW_COPY_AND_ASSIGN(AccumulatorPreservingScope);
};
// Scoped base class for determining how the result of an expression will be
@@ -614,6 +617,9 @@ class BytecodeGenerator::ExpressionResultScope {
allocator_.generator()->set_execution_result(outer_);
}
+ ExpressionResultScope(const ExpressionResultScope&) = delete;
+ ExpressionResultScope& operator=(const ExpressionResultScope&) = delete;
+
bool IsEffect() const { return kind_ == Expression::kEffect; }
bool IsValue() const { return kind_ == Expression::kValue; }
bool IsTest() const { return kind_ == Expression::kTest; }
@@ -641,8 +647,6 @@ class BytecodeGenerator::ExpressionResultScope {
RegisterAllocationScope allocator_;
Expression::Context kind_;
TypeHint type_hint_;
-
- DISALLOW_COPY_AND_ASSIGN(ExpressionResultScope);
};
// Scoped class used when the result of the current expression is not
@@ -674,6 +678,9 @@ class BytecodeGenerator::TestResultScope final : public ExpressionResultScope {
then_labels_(then_labels),
else_labels_(else_labels) {}
+ TestResultScope(const TestResultScope&) = delete;
+ TestResultScope& operator=(const TestResultScope&) = delete;
+
// Used when code special cases for TestResultScope and consumes any
// possible value by testing and jumping to a then/else label.
void SetResultConsumedByTest() { result_consumed_by_test_ = true; }
@@ -719,8 +726,6 @@ class BytecodeGenerator::TestResultScope final : public ExpressionResultScope {
TestFallthrough fallthrough_;
BytecodeLabels* then_labels_;
BytecodeLabels* else_labels_;
-
- DISALLOW_COPY_AND_ASSIGN(TestResultScope);
};
// Used to build a list of toplevel declaration data.
@@ -2531,7 +2536,7 @@ void BytecodeGenerator::BuildInstanceMemberInitialization(Register constructor,
void BytecodeGenerator::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
size_t entry = builder()->AllocateDeferredConstantPoolEntry();
- int index = feedback_spec()->AddFeedbackCellForCreateClosure();
+ int index = feedback_spec()->AddCreateClosureSlot();
uint8_t flags = CreateClosureFlags::Encode(false, false, false);
builder()->CreateClosure(entry, index, flags);
native_function_literals_.push_back(std::make_pair(expr, entry));
@@ -4590,11 +4595,8 @@ void BytecodeGenerator::VisitThrow(Throw* expr) {
void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
if (property->is_optional_chain_link()) {
DCHECK_NOT_NULL(optional_chaining_null_labels_);
- int right_range =
- AllocateBlockCoverageSlotIfEnabled(property, SourceRangeKind::kRight);
builder()->LoadAccumulatorWithRegister(obj).JumpIfUndefinedOrNull(
optional_chaining_null_labels_->New());
- BuildIncrementBlockCoverageCounterIfEnabled(right_range);
}
AssignType property_kind = Property::GetAssignType(property);
@@ -4924,8 +4926,9 @@ void BytecodeGenerator::VisitCall(Call* expr) {
Property* property = chain->expression()->AsProperty();
BuildOptionalChain([&]() {
VisitAndPushIntoRegisterList(property->obj(), &args);
- VisitPropertyLoadForRegister(args.last_register(), property, callee);
+ VisitPropertyLoad(args.last_register(), property);
});
+ builder()->StoreAccumulatorInRegister(callee);
break;
}
case Call::SUPER_CALL:
@@ -4934,11 +4937,8 @@ void BytecodeGenerator::VisitCall(Call* expr) {
if (expr->is_optional_chain_link()) {
DCHECK_NOT_NULL(optional_chaining_null_labels_);
- int right_range =
- AllocateBlockCoverageSlotIfEnabled(expr, SourceRangeKind::kRight);
builder()->LoadAccumulatorWithRegister(callee).JumpIfUndefinedOrNull(
optional_chaining_null_labels_->New());
- BuildIncrementBlockCoverageCounterIfEnabled(right_range);
}
// Evaluate all arguments to the function call and store in sequential args
@@ -5025,6 +5025,9 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
// First generate the array containing all arguments.
BuildCreateArrayLiteral(args, nullptr);
+ // Check if the constructor is in fact a constructor.
+ builder()->ThrowIfNotSuperConstructor(constructor);
+
// Now pass that array to %reflect_construct.
RegisterList construct_args = register_allocator()->NewRegisterList(3);
builder()->StoreAccumulatorInRegister(construct_args[1]);
@@ -5034,6 +5037,10 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
} else {
RegisterList args_regs = register_allocator()->NewGrowableRegisterList();
VisitArguments(args, &args_regs);
+
+ // Check if the constructor is in fact a constructor.
+ builder()->ThrowIfNotSuperConstructor(constructor);
+
// The new target is loaded into the accumulator from the
// {new.target} variable.
VisitForAccumulatorValue(super->new_target_var());
@@ -5210,10 +5217,7 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* unary) {
OptionalChainNullLabelScope label_scope(this);
VisitForAccumulatorValue(property->obj());
if (property->is_optional_chain_link()) {
- int right_range = AllocateBlockCoverageSlotIfEnabled(
- property, SourceRangeKind::kRight);
builder()->JumpIfUndefinedOrNull(label_scope.labels()->New());
- BuildIncrementBlockCoverageCounterIfEnabled(right_range);
}
Register object = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(object);
@@ -6660,7 +6664,7 @@ int BytecodeGenerator::GetCachedCreateClosureSlot(FunctionLiteral* literal) {
if (index != -1) {
return index;
}
- index = feedback_spec()->AddFeedbackCellForCreateClosure();
+ index = feedback_spec()->AddCreateClosureSlot();
feedback_slot_cache()->Put(slot_kind, literal, index);
return index;
}
diff --git a/deps/v8/src/interpreter/bytecode-label.h b/deps/v8/src/interpreter/bytecode-label.h
index 4581f4f4e2..1c9d0e9d6b 100644
--- a/deps/v8/src/interpreter/bytecode-label.h
+++ b/deps/v8/src/interpreter/bytecode-label.h
@@ -84,6 +84,8 @@ class V8_EXPORT_PRIVATE BytecodeLabel final {
class V8_EXPORT_PRIVATE BytecodeLabels {
public:
explicit BytecodeLabels(Zone* zone) : labels_(zone), is_bound_(false) {}
+ BytecodeLabels(const BytecodeLabels&) = delete;
+ BytecodeLabels& operator=(const BytecodeLabels&) = delete;
BytecodeLabel* New();
@@ -103,8 +105,6 @@ class V8_EXPORT_PRIVATE BytecodeLabels {
private:
ZoneLinkedList<BytecodeLabel> labels_;
bool is_bound_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeLabels);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.h b/deps/v8/src/interpreter/bytecode-register-allocator.h
index b270e3d38b..442e3d27aa 100644
--- a/deps/v8/src/interpreter/bytecode-register-allocator.h
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.h
@@ -30,6 +30,9 @@ class BytecodeRegisterAllocator final {
max_register_count_(start_index),
observer_(nullptr) {}
~BytecodeRegisterAllocator() = default;
+ BytecodeRegisterAllocator(const BytecodeRegisterAllocator&) = delete;
+ BytecodeRegisterAllocator& operator=(const BytecodeRegisterAllocator&) =
+ delete;
// Returns a new register.
Register NewRegister() {
@@ -101,8 +104,6 @@ class BytecodeRegisterAllocator final {
int next_register_index_;
int max_register_count_;
Observer* observer_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeRegisterAllocator);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
index e3bbfaa6f9..3d9c9e1dac 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -24,6 +24,8 @@ class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
needs_flush_(false),
next_(this),
prev_(this) {}
+ RegisterInfo(const RegisterInfo&) = delete;
+ RegisterInfo& operator=(const RegisterInfo&) = delete;
void AddToEquivalenceSetOf(RegisterInfo* info);
void MoveToNewEquivalenceSet(uint32_t equivalence_id, bool materialized);
@@ -85,8 +87,6 @@ class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
// Equivalence set pointers.
RegisterInfo* next_;
RegisterInfo* prev_;
-
- DISALLOW_COPY_AND_ASSIGN(RegisterInfo);
};
void BytecodeRegisterOptimizer::RegisterInfo::AddToEquivalenceSetOf(
@@ -233,11 +233,7 @@ BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
// a vector of register metadata.
// There is at least one parameter, which is the JS receiver.
DCHECK_NE(parameter_count, 0);
-#ifdef V8_REVERSE_JSARGS
int first_slot_index = parameter_count - 1;
-#else
- int first_slot_index = 0;
-#endif
register_info_table_offset_ =
-Register::FromParameterIndex(first_slot_index, parameter_count).index();
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index 674a4e3ac5..289b8983f3 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -25,14 +25,13 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
public:
BytecodeWriter() = default;
virtual ~BytecodeWriter() = default;
+ BytecodeWriter(const BytecodeWriter&) = delete;
+ BytecodeWriter& operator=(const BytecodeWriter&) = delete;
// Called to emit a register transfer bytecode.
virtual void EmitLdar(Register input) = 0;
virtual void EmitStar(Register output) = 0;
virtual void EmitMov(Register input, Register output) = 0;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(BytecodeWriter);
};
BytecodeRegisterOptimizer(Zone* zone,
@@ -40,6 +39,9 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
int fixed_registers_count, int parameter_count,
BytecodeWriter* bytecode_writer);
~BytecodeRegisterOptimizer() override = default;
+ BytecodeRegisterOptimizer(const BytecodeRegisterOptimizer&) = delete;
+ BytecodeRegisterOptimizer& operator=(const BytecodeRegisterOptimizer&) =
+ delete;
// Perform explicit register transfer operations.
void DoLdar(Register input) {
@@ -201,8 +203,6 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
BytecodeWriter* bytecode_writer_;
bool flush_required_;
Zone* zone_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeRegisterOptimizer);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-register.cc b/deps/v8/src/interpreter/bytecode-register.cc
index 13d831e8b7..e8eb347f16 100644
--- a/deps/v8/src/interpreter/bytecode-register.cc
+++ b/deps/v8/src/interpreter/bytecode-register.cc
@@ -8,17 +8,10 @@ namespace v8 {
namespace internal {
namespace interpreter {
-#ifdef V8_REVERSE_JSARGS
static const int kFirstParamRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
InterpreterFrameConstants::kFirstParamFromFp) /
kSystemPointerSize;
-#else
-static const int kLastParamRegisterIndex =
- (InterpreterFrameConstants::kRegisterFileFromFp -
- InterpreterFrameConstants::kLastParamFromFp) /
- kSystemPointerSize;
-#endif
static const int kFunctionClosureRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
StandardFrameConstants::kFunctionOffset) /
@@ -43,22 +36,14 @@ static const int kCallerPCOffsetRegisterIndex =
Register Register::FromParameterIndex(int index, int parameter_count) {
DCHECK_GE(index, 0);
DCHECK_LT(index, parameter_count);
-#ifdef V8_REVERSE_JSARGS
int register_index = kFirstParamRegisterIndex - index;
-#else
- int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
-#endif
DCHECK_LT(register_index, 0);
return Register(register_index);
}
int Register::ToParameterIndex(int parameter_count) const {
DCHECK(is_parameter());
-#ifdef V8_REVERSE_JSARGS
return kFirstParamRegisterIndex - index();
-#else
- return index() - kLastParamRegisterIndex + parameter_count - 1;
-#endif
}
Register Register::function_closure() {
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index fe22559f18..2cff678920 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -345,6 +345,7 @@ namespace interpreter {
V(ThrowReferenceErrorIfHole, AccumulatorUse::kRead, OperandType::kIdx) \
V(ThrowSuperNotCalledIfHole, AccumulatorUse::kRead) \
V(ThrowSuperAlreadyCalledIfNotHole, AccumulatorUse::kRead) \
+ V(ThrowIfNotSuperConstructor, AccumulatorUse::kNone, OperandType::kReg) \
\
/* Generators */ \
V(SwitchOnGeneratorState, AccumulatorUse::kNone, OperandType::kReg, \
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index a44ce0b7a1..87fe0559ae 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -198,6 +198,9 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
struct ConstantArraySlice final : public ZoneObject {
ConstantArraySlice(Zone* zone, size_t start_index, size_t capacity,
OperandSize operand_size);
+ ConstantArraySlice(const ConstantArraySlice&) = delete;
+ ConstantArraySlice& operator=(const ConstantArraySlice&) = delete;
+
void Reserve();
void Unreserve();
size_t Allocate(Entry entry, size_t count = 1);
@@ -223,8 +226,6 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
size_t reserved_;
OperandSize operand_size_;
ZoneVector<Entry> constants_;
-
- DISALLOW_COPY_AND_ASSIGN(ConstantArraySlice);
};
ConstantArraySlice* IndexToSlice(size_t index) const;
diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h
index 9bf2b17258..f5f264d7c7 100644
--- a/deps/v8/src/interpreter/handler-table-builder.h
+++ b/deps/v8/src/interpreter/handler-table-builder.h
@@ -25,6 +25,8 @@ namespace interpreter {
class V8_EXPORT_PRIVATE HandlerTableBuilder final {
public:
explicit HandlerTableBuilder(Zone* zone);
+ HandlerTableBuilder(const HandlerTableBuilder&) = delete;
+ HandlerTableBuilder& operator=(const HandlerTableBuilder&) = delete;
// Builds the actual handler table by copying the current values into a heap
// object. Any further mutations to the builder won't be reflected.
@@ -55,8 +57,6 @@ class V8_EXPORT_PRIVATE HandlerTableBuilder final {
};
ZoneVector<Entry> entries_;
-
- DISALLOW_COPY_AND_ASSIGN(HandlerTableBuilder);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index e6fd97ddf2..596783b64f 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -21,7 +21,6 @@ namespace internal {
namespace interpreter {
using compiler::CodeAssemblerState;
-using compiler::Node;
InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
Bytecode bytecode,
@@ -30,19 +29,19 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
bytecode_(bytecode),
operand_scale_(operand_scale),
TVARIABLE_CONSTRUCTOR(interpreted_frame_pointer_),
- TVARIABLE_CONSTRUCTOR(
- bytecode_array_,
- CAST(Parameter(InterpreterDispatchDescriptor::kBytecodeArray))),
+ TVARIABLE_CONSTRUCTOR(bytecode_array_,
+ Parameter<BytecodeArray>(
+ InterpreterDispatchDescriptor::kBytecodeArray)),
TVARIABLE_CONSTRUCTOR(
bytecode_offset_,
- UncheckedCast<IntPtrT>(
- Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))),
- TVARIABLE_CONSTRUCTOR(
- dispatch_table_, UncheckedCast<ExternalReference>(Parameter(
- InterpreterDispatchDescriptor::kDispatchTable))),
+ UncheckedParameter<IntPtrT>(
+ InterpreterDispatchDescriptor::kBytecodeOffset)),
+ TVARIABLE_CONSTRUCTOR(dispatch_table_,
+ UncheckedParameter<ExternalReference>(
+ InterpreterDispatchDescriptor::kDispatchTable)),
TVARIABLE_CONSTRUCTOR(
accumulator_,
- CAST(Parameter(InterpreterDispatchDescriptor::kAccumulator))),
+ Parameter<Object>(InterpreterDispatchDescriptor::kAccumulator)),
accumulator_use_(AccumulatorUse::kNone),
made_call_(false),
reloaded_frame_ptr_(false),
@@ -83,7 +82,8 @@ TNode<RawPtrT> InterpreterAssembler::GetInterpretedFramePointer() {
TNode<IntPtrT> InterpreterAssembler::BytecodeOffset() {
if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
(bytecode_offset_.value() ==
- Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) {
+ UncheckedParameter<IntPtrT>(
+ InterpreterDispatchDescriptor::kBytecodeOffset))) {
bytecode_offset_ = ReloadBytecodeOffset();
}
return bytecode_offset_.value();
@@ -140,7 +140,8 @@ TNode<BytecodeArray> InterpreterAssembler::BytecodeArrayTaggedPointer() {
TNode<ExternalReference> InterpreterAssembler::DispatchTablePointer() {
if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
(dispatch_table_.value() ==
- Parameter(InterpreterDispatchDescriptor::kDispatchTable))) {
+ UncheckedParameter<ExternalReference>(
+ InterpreterDispatchDescriptor::kDispatchTable))) {
dispatch_table_ = ExternalConstant(
ExternalReference::interpreter_dispatch_table_address(isolate()));
}
@@ -772,15 +773,9 @@ void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// The first argument parameter (the receiver) is implied to be undefined.
-#ifdef V8_REVERSE_JSARGS
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
context, function, arg_count, args...,
UndefinedConstant());
-#else
- TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
- context, function, arg_count,
- UndefinedConstant(), args...);
-#endif
} else {
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
context, function, arg_count, args...);
@@ -846,10 +841,9 @@ TNode<Object> InterpreterAssembler::Construct(
Comment("call using Construct builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kOther);
- TNode<Code> code_target = HeapConstant(callable.code());
- var_result = CallStub(callable.descriptor(), code_target, context,
- args.reg_count(), args.base_reg_location(), target,
- new_target, UndefinedConstant());
+ var_result =
+ CallStub(callable, context, args.reg_count(), args.base_reg_location(),
+ target, new_target, UndefinedConstant());
Goto(&return_result);
}
@@ -860,10 +854,9 @@ TNode<Object> InterpreterAssembler::Construct(
Comment("call using ConstructArray builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kArrayFunction);
- TNode<Code> code_target = HeapConstant(callable.code());
- var_result = CallStub(callable.descriptor(), code_target, context,
- args.reg_count(), args.base_reg_location(), target,
- new_target, var_site.value());
+ var_result =
+ CallStub(callable, context, args.reg_count(), args.base_reg_location(),
+ target, new_target, var_site.value());
Goto(&return_result);
}
@@ -988,19 +981,18 @@ TNode<Object> InterpreterAssembler::ConstructWithSpread(
Comment("call using ConstructWithSpread builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kWithFinalSpread);
- TNode<Code> code_target = HeapConstant(callable.code());
- return CallStub(callable.descriptor(), code_target, context, args.reg_count(),
- args.base_reg_location(), target, new_target,
- UndefinedConstant());
+ return CallStub(callable, context, args.reg_count(), args.base_reg_location(),
+ target, new_target, UndefinedConstant());
}
-Node* InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
- TNode<Context> context,
- const RegListNodePair& args,
- int result_size) {
+template <class T>
+TNode<T> InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
+ TNode<Context> context,
+ const RegListNodePair& args,
+ int return_count) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallRuntime(bytecode_));
- Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
+ Callable callable = CodeFactory::InterpreterCEntry(isolate(), return_count);
TNode<Code> code_target = HeapConstant(callable.code());
// Get the function entry from the function id.
@@ -1013,11 +1005,20 @@ Node* InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
TNode<RawPtrT> function_entry = Load<RawPtrT>(
function, IntPtrConstant(offsetof(Runtime::Function, entry)));
- return CallStubR(StubCallMode::kCallCodeObject, callable.descriptor(),
- result_size, code_target, context, args.reg_count(),
- args.base_reg_location(), function_entry);
+ return CallStub<T>(callable.descriptor(), code_target, context,
+ args.reg_count(), args.base_reg_location(),
+ function_entry);
}
+template V8_EXPORT_PRIVATE TNode<Object> InterpreterAssembler::CallRuntimeN(
+ TNode<Uint32T> function_id, TNode<Context> context,
+ const RegListNodePair& args, int return_count);
+template V8_EXPORT_PRIVATE TNode<PairT<Object, Object>>
+InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
+ TNode<Context> context,
+ const RegListNodePair& args,
+ int return_count);
+
void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight,
bool backward) {
Comment("[ UpdateInterruptBudget");
@@ -1399,14 +1400,8 @@ TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
// Iterate over parameters and write them into the array.
Label loop(this, &var_index), done_loop(this);
-#ifdef V8_REVERSE_JSARGS
TNode<IntPtrT> reg_base =
IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() + 1);
-#else
- TNode<IntPtrT> reg_base = IntPtrAdd(
- IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() - 1),
- formal_parameter_count_intptr);
-#endif
Goto(&loop);
BIND(&loop);
@@ -1415,11 +1410,7 @@ TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
&done_loop);
-#ifdef V8_REVERSE_JSARGS
TNode<IntPtrT> reg_index = IntPtrAdd(reg_base, index);
-#else
- TNode<IntPtrT> reg_index = IntPtrSub(reg_base, index);
-#endif
TNode<Object> value = LoadRegister(reg_index);
StoreFixedArrayElement(array, index, value);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 729e23c7a6..2884aaed1a 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -22,6 +22,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
InterpreterAssembler(compiler::CodeAssemblerState* state, Bytecode bytecode,
OperandScale operand_scale);
~InterpreterAssembler();
+ InterpreterAssembler(const InterpreterAssembler&) = delete;
+ InterpreterAssembler& operator=(const InterpreterAssembler&) = delete;
// Returns the 32-bit unsigned count immediate for bytecode operand
// |operand_index| in the current bytecode.
@@ -191,12 +193,10 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
TNode<UintPtrT> slot_id,
TNode<HeapObject> maybe_feedback_vector);
- // Call runtime function with |args| arguments which will return |return_size|
- // number of values.
- compiler::Node* CallRuntimeN(TNode<Uint32T> function_id,
- TNode<Context> context,
- const RegListNodePair& args,
- int return_size = 1);
+ // Call runtime function with |args| arguments.
+ template <class T = Object>
+ TNode<T> CallRuntimeN(TNode<Uint32T> function_id, TNode<Context> context,
+ const RegListNodePair& args, int return_count);
// Jump forward relative to the current bytecode by the |jump_offset|.
void Jump(TNode<IntPtrT> jump_offset);
@@ -402,8 +402,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
bool made_call_;
bool reloaded_frame_ptr_;
bool bytecode_array_valid_;
-
- DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index b3ca4a1b9c..3b7172867e 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -36,7 +36,6 @@ namespace interpreter {
namespace {
using compiler::CodeAssemblerState;
-using compiler::Node;
using Label = CodeStubAssembler::Label;
#define IGNITION_HANDLER(Name, BaseAssembler) \
@@ -45,12 +44,13 @@ using Label = CodeStubAssembler::Label;
explicit Name##Assembler(compiler::CodeAssemblerState* state, \
Bytecode bytecode, OperandScale scale) \
: BaseAssembler(state, bytecode, scale) {} \
+ Name##Assembler(const Name##Assembler&) = delete; \
+ Name##Assembler& operator=(const Name##Assembler&) = delete; \
static void Generate(compiler::CodeAssemblerState* state, \
OperandScale scale); \
\
private: \
void GenerateImpl(); \
- DISALLOW_COPY_AND_ASSIGN(Name##Assembler); \
}; \
void Name##Assembler::Generate(compiler::CodeAssemblerState* state, \
OperandScale scale) { \
@@ -600,7 +600,6 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
: InterpreterAssembler(state, bytecode, operand_scale) {}
void StaNamedProperty(Callable ic, NamedPropertyType property_type) {
- TNode<Code> code_target = HeapConstant(ic.code());
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(1));
TNode<Object> value = GetAccumulator();
@@ -609,8 +608,7 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
- var_result = CallStub(ic.descriptor(), code_target, context, object, name,
- value, slot, maybe_vector);
+ var_result = CallStub(ic, context, object, name, value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -1340,8 +1338,7 @@ IGNITION_HANDLER(DeletePropertySloppy, InterpreterAssembler) {
// The result is stored in register |reg|.
IGNITION_HANDLER(GetSuperConstructor, InterpreterAssembler) {
TNode<JSFunction> active_function = CAST(GetAccumulator());
- TNode<Context> context = GetContext();
- TNode<Object> result = GetSuperConstructor(context, active_function);
+ TNode<Object> result = GetSuperConstructor(active_function);
StoreRegisterAtOperandIndex(result, 0);
Dispatch();
}
@@ -1407,32 +1404,17 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
break;
case 2:
-#ifdef V8_REVERSE_JSARGS
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
-#else
- CallJSAndDispatch(
- function, context, Int32Constant(arg_count), receiver_mode,
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1));
-#endif
break;
case 3:
-#ifdef V8_REVERSE_JSARGS
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2),
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
-#else
- CallJSAndDispatch(
- function, context, Int32Constant(arg_count), receiver_mode,
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2));
-#endif
break;
default:
UNREACHABLE();
@@ -1494,7 +1476,7 @@ IGNITION_HANDLER(CallRuntime, InterpreterAssembler) {
TNode<Uint32T> function_id = BytecodeOperandRuntimeId(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
TNode<Context> context = GetContext();
- TNode<Object> result = CAST(CallRuntimeN(function_id, context, args));
+ TNode<Object> result = CallRuntimeN(function_id, context, args, 1);
SetAccumulator(result);
Dispatch();
}
@@ -1525,10 +1507,11 @@ IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) {
TNode<Uint32T> function_id = BytecodeOperandRuntimeId(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
TNode<Context> context = GetContext();
- Node* result_pair = CallRuntimeN(function_id, context, args, 2);
+ auto result_pair =
+ CallRuntimeN<PairT<Object, Object>>(function_id, context, args, 2);
// Store the results in <first_return> and <first_return + 1>
- TNode<Object> result0 = CAST(Projection(0, result_pair));
- TNode<Object> result1 = CAST(Projection(1, result_pair));
+ TNode<Object> result0 = Projection<0>(result_pair);
+ TNode<Object> result1 = Projection<1>(result_pair);
StoreRegisterPairAtOperandIndex(result0, result1, 3);
Dispatch();
}
@@ -2209,8 +2192,7 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
BIND(&osr_armed);
{
Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
- TNode<Code> target = HeapConstant(callable.code());
- CallStub(callable.descriptor(), target, context);
+ CallStub(callable, context);
JumpBackward(relative_jump);
}
}
@@ -2737,7 +2719,7 @@ IGNITION_HANDLER(ThrowSuperNotCalledIfHole, InterpreterAssembler) {
// ThrowSuperAlreadyCalledIfNotHole
//
-// Throws SuperAleradyCalled exception if the value in the accumulator is not
+// Throws SuperAlreadyCalled exception if the value in the accumulator is not
// TheHole.
IGNITION_HANDLER(ThrowSuperAlreadyCalledIfNotHole, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
@@ -2755,6 +2737,31 @@ IGNITION_HANDLER(ThrowSuperAlreadyCalledIfNotHole, InterpreterAssembler) {
}
}
+// ThrowIfNotSuperConstructor <constructor>
+//
+// Throws an exception if the value in |constructor| is not in fact a
+// constructor.
+IGNITION_HANDLER(ThrowIfNotSuperConstructor, InterpreterAssembler) {
+ TNode<HeapObject> constructor = CAST(LoadRegisterAtOperandIndex(0));
+ TNode<Context> context = GetContext();
+
+ Label is_not_constructor(this, Label::kDeferred);
+ TNode<Map> constructor_map = LoadMap(constructor);
+ GotoIfNot(IsConstructorMap(constructor_map), &is_not_constructor);
+ Dispatch();
+
+ BIND(&is_not_constructor);
+ {
+ TNode<JSFunction> function =
+ CAST(LoadRegister(Register::function_closure()));
+ CallRuntime(Runtime::kThrowNotSuperConstructor, context, constructor,
+ function);
+ // We shouldn't ever return from a throw.
+ Abort(AbortReason::kUnexpectedReturnFromThrow);
+ Unreachable();
+ }
+}
+
// Debugger
//
// Call runtime to handle debugger statement.
@@ -2771,10 +2778,10 @@ IGNITION_HANDLER(Debugger, InterpreterAssembler) {
IGNITION_HANDLER(Name, InterpreterAssembler) { \
TNode<Context> context = GetContext(); \
TNode<Object> accumulator = GetAccumulator(); \
- TNode<Object> result_pair = \
- CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
- TNode<Object> return_value = CAST(Projection(0, result_pair)); \
- TNode<IntPtrT> original_bytecode = SmiUntag(Projection(1, result_pair)); \
+ TNode<PairT<Object, Smi>> result_pair = CallRuntime<PairT<Object, Smi>>( \
+ Runtime::kDebugBreakOnBytecode, context, accumulator); \
+ TNode<Object> return_value = Projection<0>(result_pair); \
+ TNode<IntPtrT> original_bytecode = SmiUntag(Projection<1>(result_pair)); \
MaybeDropFrames(context); \
SetAccumulator(return_value); \
DispatchToBytecode(original_bytecode, BytecodeOffset()); \
@@ -2841,57 +2848,14 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
TNode<UintPtrT> vector_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
- // Check if we're using an enum cache.
- Label if_fast(this), if_slow(this);
- Branch(IsMap(enumerator), &if_fast, &if_slow);
-
- BIND(&if_fast);
- {
- // Load the enumeration length and cache from the {enumerator}.
- TNode<Map> map_enumerator = CAST(enumerator);
- TNode<WordT> enum_length = LoadMapEnumLength(map_enumerator);
- CSA_ASSERT(this, WordNotEqual(enum_length,
- IntPtrConstant(kInvalidEnumCacheSentinel)));
- TNode<DescriptorArray> descriptors = LoadMapDescriptors(map_enumerator);
- TNode<EnumCache> enum_cache = LoadObjectField<EnumCache>(
- descriptors, DescriptorArray::kEnumCacheOffset);
- TNode<FixedArray> enum_keys =
- LoadObjectField<FixedArray>(enum_cache, EnumCache::kKeysOffset);
-
- // Check if we have enum indices available.
- TNode<FixedArray> enum_indices =
- LoadObjectField<FixedArray>(enum_cache, EnumCache::kIndicesOffset);
- TNode<IntPtrT> enum_indices_length =
- LoadAndUntagFixedArrayBaseLength(enum_indices);
- TNode<Smi> feedback = SelectSmiConstant(
- IntPtrLessThanOrEqual(enum_length, enum_indices_length),
- ForInFeedback::kEnumCacheKeysAndIndices, ForInFeedback::kEnumCacheKeys);
- UpdateFeedback(feedback, maybe_feedback_vector, vector_index);
-
- // Construct the cache info triple.
- TNode<Map> cache_type = map_enumerator;
- TNode<FixedArray> cache_array = enum_keys;
- TNode<Smi> cache_length = SmiTag(Signed(enum_length));
- StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
- Dispatch();
- }
+ TNode<HeapObject> cache_type = enumerator; // Just to clarify the rename.
+ TNode<FixedArray> cache_array;
+ TNode<Smi> cache_length;
+ ForInPrepare(enumerator, vector_index, maybe_feedback_vector, &cache_array,
+ &cache_length);
- BIND(&if_slow);
- {
- // The {enumerator} is a FixedArray with all the keys to iterate.
- TNode<FixedArray> array_enumerator = CAST(enumerator);
-
- // Record the fact that we hit the for-in slow-path.
- UpdateFeedback(SmiConstant(ForInFeedback::kAny), maybe_feedback_vector,
- vector_index);
-
- // Construct the cache info triple.
- TNode<FixedArray> cache_type = array_enumerator;
- TNode<FixedArray> cache_array = array_enumerator;
- TNode<Smi> cache_length = LoadFixedArrayBaseLength(array_enumerator);
- StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
- Dispatch();
- }
+ StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
+ Dispatch();
}
// ForInNext <receiver> <index> <cache_info_pair>
@@ -2921,14 +2885,9 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
}
BIND(&if_slow);
{
- // Record the fact that we hit the for-in slow-path.
- UpdateFeedback(SmiConstant(ForInFeedback::kAny), maybe_feedback_vector,
- vector_index);
-
- // Need to filter the {key} for the {receiver}.
- TNode<Context> context = GetContext();
TNode<Object> result =
- CallBuiltin(Builtins::kForInFilter, context, key, receiver);
+ ForInNextSlow(GetContext(), vector_index, receiver, key, cache_type,
+ maybe_feedback_vector);
SetAccumulator(result);
Dispatch();
}
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index 03ca61de52..b9975e66ea 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -20,14 +20,14 @@ namespace v8 {
namespace internal {
namespace interpreter {
-using compiler::Node;
-
class IntrinsicsGenerator {
public:
explicit IntrinsicsGenerator(InterpreterAssembler* assembler)
: isolate_(assembler->isolate()),
zone_(assembler->zone()),
assembler_(assembler) {}
+ IntrinsicsGenerator(const IntrinsicsGenerator&) = delete;
+ IntrinsicsGenerator& operator=(const IntrinsicsGenerator&) = delete;
TNode<Object> InvokeIntrinsic(
TNode<Uint32T> function_id, TNode<Context> context,
@@ -42,17 +42,14 @@ class IntrinsicsGenerator {
TNode<Oddball> IsInstanceType(TNode<Object> input, int type);
TNode<BoolT> CompareInstanceType(TNode<HeapObject> map, int type,
InstanceTypeCompareMode mode);
- TNode<Object> IntrinsicAsStubCall(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
- Callable const& callable);
TNode<Object> IntrinsicAsBuiltinCall(
const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
- Builtins::Name name);
+ Builtins::Name name, int arg_count);
void AbortIfArgCountMismatch(int expected, TNode<Word32T> actual);
#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \
TNode<Object> name(const InterpreterAssembler::RegListNodePair& args, \
- TNode<Context> context);
+ TNode<Context> context, int arg_count);
INTRINSICS_LIST(DECLARE_INTRINSIC_HELPER)
#undef DECLARE_INTRINSIC_HELPER
@@ -63,8 +60,6 @@ class IntrinsicsGenerator {
Isolate* isolate_;
Zone* zone_;
InterpreterAssembler* assembler_;
-
- DISALLOW_COPY_AND_ASSIGN(IntrinsicsGenerator);
};
TNode<Object> GenerateInvokeIntrinsic(
@@ -103,7 +98,7 @@ TNode<Object> IntrinsicsGenerator::InvokeIntrinsic(
if (FLAG_debug_code && expected_arg_count >= 0) { \
AbortIfArgCountMismatch(expected_arg_count, args.reg_count()); \
} \
- TNode<Object> value = name(args, context); \
+ TNode<Object> value = name(args, context, expected_arg_count); \
if (value) { \
result = value; \
__ Goto(&end); \
@@ -146,8 +141,34 @@ TNode<Oddball> IntrinsicsGenerator::IsInstanceType(TNode<Object> input,
return result;
}
+TNode<Object> IntrinsicsGenerator::IntrinsicAsBuiltinCall(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ Builtins::Name name, int arg_count) {
+ Callable callable = Builtins::CallableFor(isolate_, name);
+ switch (arg_count) {
+ case 1:
+ return __ CallStub(callable, context,
+ __ LoadRegisterFromRegisterList(args, 0));
+ break;
+ case 2:
+ return __ CallStub(callable, context,
+ __ LoadRegisterFromRegisterList(args, 0),
+ __ LoadRegisterFromRegisterList(args, 1));
+ break;
+ case 3:
+ return __ CallStub(callable, context,
+ __ LoadRegisterFromRegisterList(args, 0),
+ __ LoadRegisterFromRegisterList(args, 1),
+ __ LoadRegisterFromRegisterList(args, 2));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
TNode<Object> IntrinsicsGenerator::IsJSReceiver(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
TNode<Object> input = __ LoadRegisterFromRegisterList(args, 0);
TNode<Oddball> result = __ Select<Oddball>(
__ TaggedIsSmi(input), [=] { return __ FalseConstant(); },
@@ -158,81 +179,61 @@ TNode<Object> IntrinsicsGenerator::IsJSReceiver(
}
TNode<Object> IntrinsicsGenerator::IsArray(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
TNode<Object> input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_ARRAY_TYPE);
}
TNode<Object> IntrinsicsGenerator::IsSmi(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
TNode<Object> input = __ LoadRegisterFromRegisterList(args, 0);
return __ SelectBooleanConstant(__ TaggedIsSmi(input));
}
-TNode<Object> IntrinsicsGenerator::IntrinsicAsStubCall(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
- Callable const& callable) {
- int param_count = callable.descriptor().GetParameterCount();
- int input_count = param_count + 2; // +2 for target and context
- Node** stub_args = zone()->NewArray<Node*>(input_count);
- int index = 0;
- stub_args[index++] = __ HeapConstant(callable.code());
- for (int i = 0; i < param_count; i++) {
- stub_args[index++] = __ LoadRegisterFromRegisterList(args, i);
- }
- stub_args[index++] = context;
- return __ CAST(__ CallStubN(StubCallMode::kCallCodeObject,
- callable.descriptor(), 1, input_count,
- stub_args));
-}
-
-TNode<Object> IntrinsicsGenerator::IntrinsicAsBuiltinCall(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
- Builtins::Name name) {
- Callable callable = Builtins::CallableFor(isolate_, name);
- return IntrinsicAsStubCall(args, context, callable);
-}
-
TNode<Object> IntrinsicsGenerator::CopyDataProperties(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsStubCall(
- args, context,
- Builtins::CallableFor(isolate(), Builtins::kCopyDataProperties));
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kCopyDataProperties,
+ arg_count);
}
TNode<Object> IntrinsicsGenerator::CreateIterResultObject(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsStubCall(
- args, context,
- Builtins::CallableFor(isolate(), Builtins::kCreateIterResultObject));
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kCreateIterResultObject, arg_count);
}
TNode<Object> IntrinsicsGenerator::HasProperty(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsStubCall(
- args, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty));
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kHasProperty,
+ arg_count);
}
TNode<Object> IntrinsicsGenerator::ToString(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsStubCall(
- args, context, Builtins::CallableFor(isolate(), Builtins::kToString));
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kToString, arg_count);
}
TNode<Object> IntrinsicsGenerator::ToLength(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsStubCall(
- args, context, Builtins::CallableFor(isolate(), Builtins::kToLength));
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kToLength, arg_count);
}
TNode<Object> IntrinsicsGenerator::ToObject(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsStubCall(
- args, context, Builtins::CallableFor(isolate(), Builtins::kToObject));
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kToObject, arg_count);
}
TNode<Object> IntrinsicsGenerator::Call(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
// First argument register contains the function target.
TNode<Object> function = __ LoadRegisterFromRegisterList(args, 0);
@@ -258,7 +259,8 @@ TNode<Object> IntrinsicsGenerator::Call(
}
TNode<Object> IntrinsicsGenerator::CreateAsyncFromSyncIterator(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
InterpreterAssembler::Label not_receiver(
assembler_, InterpreterAssembler::Label::kDeferred);
InterpreterAssembler::Label done(assembler_);
@@ -299,13 +301,15 @@ TNode<Object> IntrinsicsGenerator::CreateAsyncFromSyncIterator(
}
TNode<Object> IntrinsicsGenerator::CreateJSGeneratorObject(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context,
- Builtins::kCreateGeneratorObject);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kCreateGeneratorObject,
+ arg_count);
}
TNode<Object> IntrinsicsGenerator::GeneratorGetResumeMode(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
TNode<JSGeneratorObject> generator =
__ CAST(__ LoadRegisterFromRegisterList(args, 0));
const TNode<Object> value =
@@ -315,7 +319,8 @@ TNode<Object> IntrinsicsGenerator::GeneratorGetResumeMode(
}
TNode<Object> IntrinsicsGenerator::GeneratorClose(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
TNode<JSGeneratorObject> generator =
__ CAST(__ LoadRegisterFromRegisterList(args, 0));
__ StoreObjectFieldNoWriteBarrier(
@@ -325,7 +330,8 @@ TNode<Object> IntrinsicsGenerator::GeneratorClose(
}
TNode<Object> IntrinsicsGenerator::GetImportMetaObject(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
const TNode<Context> module_context = __ LoadModuleContext(context);
const TNode<HeapObject> module =
__ CAST(__ LoadContextElement(module_context, Context::EXTENSION_INDEX));
@@ -346,58 +352,73 @@ TNode<Object> IntrinsicsGenerator::GetImportMetaObject(
}
TNode<Object> IntrinsicsGenerator::AsyncFunctionAwaitCaught(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
return IntrinsicAsBuiltinCall(args, context,
- Builtins::kAsyncFunctionAwaitCaught);
+ Builtins::kAsyncFunctionAwaitCaught, arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncFunctionAwaitUncaught(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context,
- Builtins::kAsyncFunctionAwaitUncaught);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(
+ args, context, Builtins::kAsyncFunctionAwaitUncaught, arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncFunctionEnter(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionEnter);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionEnter,
+ arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncFunctionReject(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionReject);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionReject,
+ arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncFunctionResolve(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionResolve);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionResolve,
+ arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncGeneratorAwaitCaught(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context,
- Builtins::kAsyncGeneratorAwaitCaught);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(
+ args, context, Builtins::kAsyncGeneratorAwaitCaught, arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncGeneratorAwaitUncaught(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context,
- Builtins::kAsyncGeneratorAwaitUncaught);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(
+ args, context, Builtins::kAsyncGeneratorAwaitUncaught, arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncGeneratorReject(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorReject);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorReject,
+ arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncGeneratorResolve(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context,
- Builtins::kAsyncGeneratorResolve);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorResolve,
+ arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncGeneratorYield(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorYield);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorYield,
+ arg_count);
}
void IntrinsicsGenerator::AbortIfArgCountMismatch(int expected,
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 702c0474bc..40fdc60337 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -35,6 +35,9 @@ class InterpreterCompilationJob final : public UnoptimizedCompilationJob {
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
std::vector<FunctionLiteral*>* eager_inner_literals);
+ InterpreterCompilationJob(const InterpreterCompilationJob&) = delete;
+ InterpreterCompilationJob& operator=(const InterpreterCompilationJob&) =
+ delete;
protected:
Status ExecuteJobImpl() final;
@@ -57,8 +60,6 @@ class InterpreterCompilationJob final : public UnoptimizedCompilationJob {
Zone zone_;
UnoptimizedCompilationInfo compilation_info_;
BytecodeGenerator generator_;
-
- DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
};
Interpreter::Interpreter(Isolate* isolate)
@@ -78,12 +79,21 @@ Interpreter::Interpreter(Isolate* isolate)
namespace {
int BuiltinIndexFromBytecode(Bytecode bytecode, OperandScale operand_scale) {
- int index = BytecodeOperands::OperandScaleAsIndex(operand_scale) *
- kNumberOfBytecodeHandlers +
- static_cast<int>(bytecode);
- int offset = kBytecodeToBuiltinsMapping[index];
- return offset >= 0 ? Builtins::kFirstBytecodeHandler + offset
- : Builtins::kIllegalHandler;
+ int index = static_cast<int>(bytecode);
+ if (operand_scale != OperandScale::kSingle) {
+ // The table contains uint8_t offsets starting at 0 with
+ // kIllegalBytecodeHandlerEncoding for illegal bytecode/scale combinations.
+ uint8_t offset = kWideBytecodeToBuiltinsMapping[index];
+ if (offset == kIllegalBytecodeHandlerEncoding) {
+ return Builtins::kIllegalHandler;
+ } else {
+ index = kNumberOfBytecodeHandlers + offset;
+ if (operand_scale == OperandScale::kQuadruple) {
+ index += kNumberOfWideBytecodeHandlers;
+ }
+ }
+ }
+ return Builtins::kFirstBytecodeHandler + index;
}
} // namespace
@@ -250,7 +260,7 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::DoFinalizeJobImpl(
SourcePositionTableBuilder::RecordingMode::RECORD_SOURCE_POSITIONS) {
Handle<ByteArray> source_position_table =
generator()->FinalizeSourcePositionTable(isolate);
- bytecodes->set_synchronized_source_position_table(*source_position_table);
+ bytecodes->set_source_position_table(*source_position_table, kReleaseStore);
}
if (ShouldPrintBytecode(shared_info)) {
@@ -286,7 +296,7 @@ Interpreter::NewSourcePositionCollectionJob(
auto job = std::make_unique<InterpreterCompilationJob>(parse_info, literal,
allocator, nullptr);
job->compilation_info()->SetBytecodeArray(existing_bytecode);
- return std::unique_ptr<UnoptimizedCompilationJob> { static_cast<UnoptimizedCompilationJob*>(job.release()) };
+ return job;
}
void Interpreter::ForEachBytecode(
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 3ef28fdfbf..3bbd93fffb 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -37,6 +37,8 @@ class Interpreter {
public:
explicit Interpreter(Isolate* isolate);
virtual ~Interpreter() = default;
+ Interpreter(const Interpreter&) = delete;
+ Interpreter& operator=(const Interpreter&) = delete;
// Creates a compilation job which will generate bytecode for |literal|.
// Additionally, if |eager_inner_literals| is not null, adds any eagerly
@@ -105,8 +107,6 @@ class Interpreter {
Address dispatch_table_[kDispatchTableSize];
std::unique_ptr<uintptr_t[]> bytecode_dispatch_counters_table_;
Address interpreter_entry_trampoline_instruction_start_;
-
- DISALLOW_COPY_AND_ASSIGN(Interpreter);
};
} // namespace interpreter
diff --git a/deps/v8/src/json/DIR_METADATA b/deps/v8/src/json/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/json/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/json/OWNERS b/deps/v8/src/json/OWNERS
index 48b6dfd658..85599ae570 100644
--- a/deps/v8/src/json/OWNERS
+++ b/deps/v8/src/json/OWNERS
@@ -1,5 +1,3 @@
ishell@chromium.org
jkummerow@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/json/json-parser.cc b/deps/v8/src/json/json-parser.cc
index d099fa36cb..13d4d6c91d 100644
--- a/deps/v8/src/json/json-parser.cc
+++ b/deps/v8/src/json/json-parser.cc
@@ -273,8 +273,7 @@ void JsonParser<Char>::ReportUnexpectedToken(JsonToken token) {
// separated source file.
isolate()->debug()->OnCompileError(script);
MessageLocation location(script, pos, pos + 1);
- Handle<Object> error = factory->NewSyntaxError(message, arg1, arg2);
- isolate()->Throw(*error, &location);
+ isolate()->ThrowAt(factory->NewSyntaxError(message, arg1, arg2), &location);
// Move the cursor to the end so we won't be able to proceed parsing.
cursor_ = end_;
@@ -464,9 +463,10 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
Handle<Map> target;
InternalIndex descriptor_index(descriptor);
if (descriptor < feedback_descriptors) {
- expected = handle(String::cast(feedback->instance_descriptors().GetKey(
- descriptor_index)),
- isolate_);
+ expected =
+ handle(String::cast(feedback->instance_descriptors(kRelaxedLoad)
+ .GetKey(descriptor_index)),
+ isolate_);
} else {
DisallowHeapAllocation no_gc;
TransitionsAccessor transitions(isolate(), *map, &no_gc);
@@ -497,7 +497,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
Handle<Object> value = property.value;
PropertyDetails details =
- target->instance_descriptors().GetDetails(descriptor_index);
+ target->instance_descriptors(kRelaxedLoad).GetDetails(descriptor_index);
Representation expected_representation = details.representation();
if (!value->FitsRepresentation(expected_representation)) {
@@ -512,7 +512,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
Map::GeneralizeField(isolate(), target, descriptor_index,
details.constness(), representation, value_type);
} else if (expected_representation.IsHeapObject() &&
- !target->instance_descriptors()
+ !target->instance_descriptors(kRelaxedLoad)
.GetFieldType(descriptor_index)
.NowContains(value)) {
Handle<FieldType> value_type =
@@ -525,7 +525,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
new_mutable_double++;
}
- DCHECK(target->instance_descriptors()
+ DCHECK(target->instance_descriptors(kRelaxedLoad)
.GetFieldType(descriptor_index)
.NowContains(value));
map = target;
@@ -575,7 +575,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
if (property.string.is_index()) continue;
InternalIndex descriptor_index(descriptor);
PropertyDetails details =
- map->instance_descriptors().GetDetails(descriptor_index);
+ map->instance_descriptors(kRelaxedLoad).GetDetails(descriptor_index);
Object value = *property.value;
FieldIndex index = FieldIndex::ForDescriptor(*map, descriptor_index);
descriptor++;
diff --git a/deps/v8/src/json/json-stringifier.cc b/deps/v8/src/json/json-stringifier.cc
index 47d6a0ddad..0dabdd3082 100644
--- a/deps/v8/src/json/json-stringifier.cc
+++ b/deps/v8/src/json/json-stringifier.cc
@@ -772,11 +772,13 @@ JsonStringifier::Result JsonStringifier::SerializeJSObject(
Indent();
bool comma = false;
for (InternalIndex i : map->IterateOwnDescriptors()) {
- Handle<Name> name(map->instance_descriptors().GetKey(i), isolate_);
+ Handle<Name> name(map->instance_descriptors(kRelaxedLoad).GetKey(i),
+ isolate_);
// TODO(rossberg): Should this throw?
if (!name->IsString()) continue;
Handle<String> key = Handle<String>::cast(name);
- PropertyDetails details = map->instance_descriptors().GetDetails(i);
+ PropertyDetails details =
+ map->instance_descriptors(kRelaxedLoad).GetDetails(i);
if (details.IsDontEnum()) continue;
Handle<Object> property;
if (details.location() == kField && *map == object->map()) {
diff --git a/deps/v8/src/libplatform/DIR_METADATA b/deps/v8/src/libplatform/DIR_METADATA
new file mode 100644
index 0000000000..a27ea1b53a
--- /dev/null
+++ b/deps/v8/src/libplatform/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>API"
+} \ No newline at end of file
diff --git a/deps/v8/src/libplatform/OWNERS b/deps/v8/src/libplatform/OWNERS
index 7ec6602147..65e7ba445c 100644
--- a/deps/v8/src/libplatform/OWNERS
+++ b/deps/v8/src/libplatform/OWNERS
@@ -1,4 +1,2 @@
mlippautz@chromium.org
ulan@chromium.org
-
-# COMPONENT: Blink>JavaScript>API
diff --git a/deps/v8/src/libplatform/default-job.cc b/deps/v8/src/libplatform/default-job.cc
index 728c1f5b28..8a8064c24c 100644
--- a/deps/v8/src/libplatform/default-job.cc
+++ b/deps/v8/src/libplatform/default-job.cc
@@ -122,10 +122,15 @@ void DefaultJobState::CancelAndWait() {
}
}
-bool DefaultJobState::IsCompleted() {
+void DefaultJobState::CancelAndDetach() {
base::MutexGuard guard(&mutex_);
- return job_task_->GetMaxConcurrency(active_workers_) == 0 &&
- active_workers_ == 0;
+ is_canceled_.store(true, std::memory_order_relaxed);
+}
+
+bool DefaultJobState::IsActive() {
+ base::MutexGuard guard(&mutex_);
+ return job_task_->GetMaxConcurrency(active_workers_) != 0 ||
+ active_workers_ != 0;
}
bool DefaultJobState::CanRunFirstTask() {
@@ -204,6 +209,11 @@ void DefaultJobState::CallOnWorkerThread(TaskPriority priority,
}
}
+void DefaultJobState::UpdatePriority(TaskPriority priority) {
+ base::MutexGuard guard(&mutex_);
+ priority_ = priority;
+}
+
DefaultJobHandle::DefaultJobHandle(std::shared_ptr<DefaultJobState> state)
: state_(std::move(state)) {
state_->NotifyConcurrencyIncrease();
@@ -220,7 +230,16 @@ void DefaultJobHandle::Cancel() {
state_ = nullptr;
}
-bool DefaultJobHandle::IsCompleted() { return state_->IsCompleted(); }
+void DefaultJobHandle::CancelAndDetach() {
+ state_->CancelAndDetach();
+ state_ = nullptr;
+}
+
+bool DefaultJobHandle::IsActive() { return state_->IsActive(); }
+
+void DefaultJobHandle::UpdatePriority(TaskPriority priority) {
+ state_->UpdatePriority(priority);
+}
} // namespace platform
} // namespace v8
diff --git a/deps/v8/src/libplatform/default-job.h b/deps/v8/src/libplatform/default-job.h
index 15517f49ac..082fa1ef6f 100644
--- a/deps/v8/src/libplatform/default-job.h
+++ b/deps/v8/src/libplatform/default-job.h
@@ -54,7 +54,8 @@ class V8_PLATFORM_EXPORT DefaultJobState
void Join();
void CancelAndWait();
- bool IsCompleted();
+ void CancelAndDetach();
+ bool IsActive();
// Must be called before running |job_task_| for the first time. If it returns
// true, then the worker thread must contribute and must call DidRunTask(), or
@@ -64,6 +65,8 @@ class V8_PLATFORM_EXPORT DefaultJobState
// must contribute again, or false if it should return.
bool DidRunTask();
+ void UpdatePriority(TaskPriority);
+
private:
// Called from the joining thread. Waits for the worker count to be below or
// equal to max concurrency (will happen when a worker calls
@@ -109,8 +112,15 @@ class V8_PLATFORM_EXPORT DefaultJobHandle : public JobHandle {
void Join() override;
void Cancel() override;
- bool IsCompleted() override;
- bool IsRunning() override { return state_ != nullptr; }
+ void CancelAndDetach() override;
+ bool IsCompleted() override { return !IsActive(); }
+ bool IsActive() override;
+ bool IsRunning() override { return IsValid(); }
+ bool IsValid() override { return state_ != nullptr; }
+
+ bool UpdatePriorityEnabled() const override { return true; }
+
+ void UpdatePriority(TaskPriority) override;
private:
std::shared_ptr<DefaultJobState> state_;
@@ -127,9 +137,11 @@ class DefaultJobWorker : public Task {
void Run() override {
auto shared_state = state_.lock();
if (!shared_state) return;
- DefaultJobState::JobDelegate delegate(shared_state.get());
if (!shared_state->CanRunFirstTask()) return;
do {
+ // Scope of |delegate| must not outlive DidRunTask() so that associated
+ // state is freed before the worker becomes inactive.
+ DefaultJobState::JobDelegate delegate(shared_state.get());
job_task_->Run(&delegate);
} while (shared_state->DidRunTask());
}
diff --git a/deps/v8/src/libsampler/DIR_METADATA b/deps/v8/src/libsampler/DIR_METADATA
new file mode 100644
index 0000000000..3ba1106a5f
--- /dev/null
+++ b/deps/v8/src/libsampler/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Platform>DevTools>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/libsampler/OWNERS b/deps/v8/src/libsampler/OWNERS
index 7ab7c063da..6afd4d0fee 100644
--- a/deps/v8/src/libsampler/OWNERS
+++ b/deps/v8/src/libsampler/OWNERS
@@ -1,4 +1,2 @@
alph@chromium.org
petermarshall@chromium.org
-
-# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index 9631d2f478..1dac546262 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -329,9 +329,9 @@ class SignalHandler {
sa.sa_sigaction = &HandleProfilerSignal;
sigemptyset(&sa.sa_mask);
#if V8_OS_QNX
- sa.sa_flags = SA_SIGINFO;
+ sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
#else
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ sa.sa_flags = SA_RESTART | SA_SIGINFO | SA_ONSTACK;
#endif
signal_handler_installed_ =
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
diff --git a/deps/v8/src/logging/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index 4623dc4b8a..9cb58dd9fc 100644
--- a/deps/v8/src/logging/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -16,9 +16,9 @@ namespace internal {
HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7) \
- HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 22, 23) \
+ HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 25, 26) \
HR(incremental_marking_sum, V8.GCIncrementalMarkingSum, 0, 10000, 101) \
- HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 22, 23) \
+ HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 25, 26) \
HR(gc_finalize_clear, V8.GCFinalizeMC.Clear, 0, 10000, 101) \
HR(gc_finalize_epilogue, V8.GCFinalizeMC.Epilogue, 0, 10000, 101) \
HR(gc_finalize_evacuate, V8.GCFinalizeMC.Evacuate, 0, 10000, 101) \
@@ -33,7 +33,7 @@ namespace internal {
/* Range and bucket matches BlinkGC.MainThreadMarkingThroughput. */ \
HR(gc_main_thread_marking_throughput, V8.GCMainThreadMarkingThroughput, 0, \
100000, 50) \
- HR(scavenge_reason, V8.GCScavengeReason, 0, 22, 23) \
+ HR(scavenge_reason, V8.GCScavengeReason, 0, 25, 26) \
HR(young_generation_handling, V8.GCYoungGenerationHandling, 0, 2, 3) \
/* Asm/Wasm. */ \
HR(wasm_functions_per_asm_module, V8.WasmFunctionsPerModule.asm, 1, 1000000, \
@@ -138,9 +138,10 @@ namespace internal {
HT(gc_scavenger, V8.GCScavenger, 10000, MILLISECOND) \
HT(gc_scavenger_background, V8.GCScavengerBackground, 10000, MILLISECOND) \
HT(gc_scavenger_foreground, V8.GCScavengerForeground, 10000, MILLISECOND) \
- HT(time_to_safepoint, V8.TimeToSafepoint, 10000, MILLISECOND) \
HT(measure_memory_delay_ms, V8.MeasureMemoryDelayMilliseconds, 100000, \
MILLISECOND) \
+ HT(stop_the_world, V8.StopTheWorld, 10000, MICROSECOND) \
+ HT(time_to_collection, V8.TimeToCollection, 10000, MICROSECOND) \
/* TurboFan timers. */ \
HT(turbofan_optimize_prepare, V8.TurboFanOptimizePrepare, 1000000, \
MICROSECOND) \
@@ -183,8 +184,6 @@ namespace internal {
1000000, MICROSECOND) \
HT(wasm_compile_wasm_function_time, V8.WasmCompileFunctionMicroSeconds.wasm, \
1000000, MICROSECOND) \
- HT(liftoff_compile_time, V8.LiftoffCompileMicroSeconds, 10000000, \
- MICROSECOND) \
HT(wasm_instantiate_wasm_module_time, \
V8.WasmInstantiateModuleMicroSeconds.wasm, 10000000, MICROSECOND) \
HT(wasm_instantiate_asm_module_time, \
diff --git a/deps/v8/src/logging/counters.cc b/deps/v8/src/logging/counters.cc
index 986848361e..c9c9aa0ebe 100644
--- a/deps/v8/src/logging/counters.cc
+++ b/deps/v8/src/logging/counters.cc
@@ -7,6 +7,7 @@
#include <iomanip>
#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
#include "src/builtins/builtins-definitions.h"
#include "src/execution/isolate.h"
#include "src/logging/counters-inl.h"
@@ -79,6 +80,15 @@ void* Histogram::CreateHistogram() const {
return counters_->CreateHistogram(name_, min_, max_, num_buckets_);
}
+void TimedHistogram::AddTimedSample(base::TimeDelta sample) {
+ if (Enabled()) {
+ int64_t sample_int = resolution_ == HistogramTimerResolution::MICROSECOND
+ ? sample.InMicroseconds()
+ : sample.InMilliseconds();
+ AddSample(static_cast<int>(sample_int));
+ }
+}
+
void TimedHistogram::Start(base::ElapsedTimer* timer, Isolate* isolate) {
if (Enabled()) timer->Start();
if (isolate) Logger::CallEventLogger(isolate, name(), Logger::START, true);
@@ -86,11 +96,9 @@ void TimedHistogram::Start(base::ElapsedTimer* timer, Isolate* isolate) {
void TimedHistogram::Stop(base::ElapsedTimer* timer, Isolate* isolate) {
if (Enabled()) {
- int64_t sample = resolution_ == HistogramTimerResolution::MICROSECOND
- ? timer->Elapsed().InMicroseconds()
- : timer->Elapsed().InMilliseconds();
+ base::TimeDelta delta = timer->Elapsed();
timer->Stop();
- AddSample(static_cast<int>(sample));
+ AddTimedSample(delta);
}
if (isolate != nullptr) {
Logger::CallEventLogger(isolate, name(), Logger::END, true);
@@ -601,6 +609,7 @@ WorkerThreadRuntimeCallStats::~WorkerThreadRuntimeCallStats() {
}
base::Thread::LocalStorageKey WorkerThreadRuntimeCallStats::GetKey() {
+ base::MutexGuard lock(&mutex_);
DCHECK(TracingFlags::is_runtime_stats_enabled());
if (!tls_key_) tls_key_ = base::Thread::CreateThreadLocalKey();
return *tls_key_;
diff --git a/deps/v8/src/logging/counters.h b/deps/v8/src/logging/counters.h
index b1b61aca0a..cb879e3c23 100644
--- a/deps/v8/src/logging/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -276,6 +276,9 @@ class TimedHistogram : public Histogram {
// that never got to run in a given scenario. Log if isolate non-null.
void RecordAbandon(base::ElapsedTimer* timer, Isolate* isolate);
+ // Add a single sample to this histogram.
+ void AddTimedSample(base::TimeDelta sample);
+
protected:
friend class Counters;
HistogramTimerResolution resolution_;
@@ -736,6 +739,7 @@ class RuntimeCallTimer final {
V(Float64Array_New) \
V(Function_Call) \
V(Function_New) \
+ V(Function_FunctionProtoToString) \
V(Function_NewInstance) \
V(FunctionTemplate_GetFunction) \
V(FunctionTemplate_New) \
@@ -747,7 +751,6 @@ class RuntimeCallTimer final {
V(Int8Array_New) \
V(Isolate_DateTimeConfigurationChangeNotification) \
V(Isolate_LocaleConfigurationChangeNotification) \
- V(JSMemberBase_New) \
V(JSON_Parse) \
V(JSON_Stringify) \
V(Map_AsArray) \
@@ -788,6 +791,7 @@ class RuntimeCallTimer final {
V(Object_HasRealIndexedProperty) \
V(Object_HasRealNamedCallbackProperty) \
V(Object_HasRealNamedProperty) \
+ V(Object_IsCodeLike) \
V(Object_New) \
V(Object_ObjectProtoToString) \
V(Object_Set) \
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index efd7c2b5f3..61909ae967 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -78,11 +78,13 @@ static v8::CodeEventType GetCodeEventTypeForTag(
}
static const char* ComputeMarker(SharedFunctionInfo shared, AbstractCode code) {
+ // TODO(mythria,jgruber): Use different markers for Turboprop/NCI.
switch (code.kind()) {
case CodeKind::INTERPRETED_FUNCTION:
return shared.optimization_disabled() ? "" : "~";
- case CodeKind::OPTIMIZED_FUNCTION:
+ case CodeKind::TURBOFAN:
case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
+ case CodeKind::TURBOPROP:
return "*";
default:
return "";
@@ -991,6 +993,13 @@ Logger::~Logger() = default;
const LogSeparator Logger::kNext = LogSeparator::kSeparator;
+int64_t Logger::Time() {
+ if (V8_UNLIKELY(FLAG_verify_predictable)) {
+ return isolate_->heap()->MonotonicallyIncreasingTimeInMs() * 1000;
+ }
+ return timer_.Elapsed().InMicroseconds();
+}
+
void Logger::AddCodeEventListener(CodeEventListener* listener) {
bool result = isolate_->code_event_dispatcher()->AddListener(listener);
CHECK(result);
@@ -1069,7 +1078,7 @@ void Logger::CurrentTimeEvent() {
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- msg << "current-time" << kNext << timer_.Elapsed().InMicroseconds();
+ msg << "current-time" << kNext << Time();
msg.WriteToLogFile();
}
@@ -1087,7 +1096,7 @@ void Logger::TimerEvent(Logger::StartEnd se, const char* name) {
case STAMP:
msg << "timer-event";
}
- msg << kNext << name << kNext << timer_.Elapsed().InMicroseconds();
+ msg << kNext << name << kNext << Time();
msg.WriteToLogFile();
}
@@ -1190,22 +1199,20 @@ namespace {
void AppendCodeCreateHeader(
Log::MessageBuilder& msg, // NOLINT(runtime/references)
CodeEventListener::LogEventsAndTags tag, CodeKind kind, uint8_t* address,
- int size, base::ElapsedTimer* timer) {
+ int size, uint64_t time) {
msg << kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT]
<< Logger::kNext << kLogEventsNames[tag] << Logger::kNext
- << static_cast<int>(kind) << Logger::kNext
- << timer->Elapsed().InMicroseconds() << Logger::kNext
+ << static_cast<int>(kind) << Logger::kNext << time << Logger::kNext
<< reinterpret_cast<void*>(address) << Logger::kNext << size
<< Logger::kNext;
}
void AppendCodeCreateHeader(
Log::MessageBuilder& msg, // NOLINT(runtime/references)
- CodeEventListener::LogEventsAndTags tag, AbstractCode code,
- base::ElapsedTimer* timer) {
+ CodeEventListener::LogEventsAndTags tag, AbstractCode code, uint64_t time) {
AppendCodeCreateHeader(msg, tag, code.kind(),
reinterpret_cast<uint8_t*>(code.InstructionStart()),
- code.InstructionSize(), timer);
+ code.InstructionSize(), time);
}
} // namespace
@@ -1217,7 +1224,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- AppendCodeCreateHeader(msg, tag, *code, &timer_);
+ AppendCodeCreateHeader(msg, tag, *code, Time());
msg << name;
msg.WriteToLogFile();
}
@@ -1229,7 +1236,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- AppendCodeCreateHeader(msg, tag, *code, &timer_);
+ AppendCodeCreateHeader(msg, tag, *code, Time());
msg << *name;
msg.WriteToLogFile();
}
@@ -1247,7 +1254,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- AppendCodeCreateHeader(msg, tag, *code, &timer_);
+ AppendCodeCreateHeader(msg, tag, *code, Time());
msg << *script_name << kNext << reinterpret_cast<void*>(shared->address())
<< kNext << ComputeMarker(*shared, *code);
msg.WriteToLogFile();
@@ -1265,7 +1272,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- AppendCodeCreateHeader(msg, tag, *code, &timer_);
+ AppendCodeCreateHeader(msg, tag, *code, Time());
msg << shared->DebugName() << " " << *script_name << ":" << line << ":"
<< column << kNext << reinterpret_cast<void*>(shared->address())
<< kNext << ComputeMarker(*shared, *code);
@@ -1365,7 +1372,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
Log::MessageBuilder& msg = *msg_ptr.get();
AppendCodeCreateHeader(msg, tag, CodeKind::WASM_FUNCTION,
code->instructions().begin(),
- code->instructions().length(), &timer_);
+ code->instructions().length(), Time());
DCHECK(!name.empty());
msg.AppendString(name);
@@ -1388,9 +1395,8 @@ void Logger::CallbackEventInternal(const char* prefix, Handle<Name> name,
Log::MessageBuilder& msg = *msg_ptr.get();
msg << kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT] << kNext
<< kLogEventsNames[CodeEventListener::CALLBACK_TAG] << kNext << -2
- << kNext << timer_.Elapsed().InMicroseconds() << kNext
- << reinterpret_cast<void*>(entry_point) << kNext << 1 << kNext << prefix
- << *name;
+ << kNext << Time() << kNext << reinterpret_cast<void*>(entry_point)
+ << kNext << 1 << kNext << prefix << *name;
msg.WriteToLogFile();
}
@@ -1413,7 +1419,7 @@ void Logger::RegExpCodeCreateEvent(Handle<AbstractCode> code,
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- AppendCodeCreateHeader(msg, CodeEventListener::REG_EXP_TAG, *code, &timer_);
+ AppendCodeCreateHeader(msg, CodeEventListener::REG_EXP_TAG, *code, Time());
msg << *source;
msg.WriteToLogFile();
}
@@ -1453,8 +1459,7 @@ void Logger::ProcessDeoptEvent(Handle<Code> code, SourcePosition position,
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- msg << "code-deopt" << kNext << timer_.Elapsed().InMicroseconds() << kNext
- << code->CodeSize() << kNext
+ msg << "code-deopt" << kNext << Time() << kNext << code->CodeSize() << kNext
<< reinterpret_cast<void*>(code->InstructionStart());
std::ostringstream deopt_location;
@@ -1578,11 +1583,16 @@ namespace {
void AppendFunctionMessage(
Log::MessageBuilder& msg, // NOLINT(runtime/references)
const char* reason, int script_id, double time_delta, int start_position,
- int end_position, base::ElapsedTimer* timer) {
+ int end_position, uint64_t time) {
msg << "function" << Logger::kNext << reason << Logger::kNext << script_id
<< Logger::kNext << start_position << Logger::kNext << end_position
- << Logger::kNext << time_delta << Logger::kNext
- << timer->Elapsed().InMicroseconds() << Logger::kNext;
+ << Logger::kNext;
+ if (V8_UNLIKELY(FLAG_predictable)) {
+ msg << 0.1;
+ } else {
+ msg << time_delta;
+ }
+ msg << Logger::kNext << time << Logger::kNext;
}
} // namespace
@@ -1594,7 +1604,7 @@ void Logger::FunctionEvent(const char* reason, int script_id, double time_delta,
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
AppendFunctionMessage(msg, reason, script_id, time_delta, start_position,
- end_position, &timer_);
+ end_position, Time());
if (!function_name.is_null()) msg << function_name;
msg.WriteToLogFile();
}
@@ -1608,7 +1618,7 @@ void Logger::FunctionEvent(const char* reason, int script_id, double time_delta,
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
AppendFunctionMessage(msg, reason, script_id, time_delta, start_position,
- end_position, &timer_);
+ end_position, Time());
if (function_name_length > 0) {
msg.AppendString(function_name, function_name_length, is_one_byte);
}
@@ -1628,7 +1638,7 @@ void Logger::CompilationCacheEvent(const char* action, const char* cache_type,
msg << "compilation-cache" << Logger::kNext << action << Logger::kNext
<< cache_type << Logger::kNext << script_id << Logger::kNext
<< sfi.StartPosition() << Logger::kNext << sfi.EndPosition()
- << Logger::kNext << timer_.Elapsed().InMicroseconds();
+ << Logger::kNext << Time();
msg.WriteToLogFile();
}
@@ -1655,8 +1665,7 @@ void Logger::ScriptEvent(ScriptEventType type, int script_id) {
msg << "streaming-compile";
break;
}
- msg << Logger::kNext << script_id << Logger::kNext
- << timer_.Elapsed().InMicroseconds();
+ msg << Logger::kNext << script_id << Logger::kNext << Time();
msg.WriteToLogFile();
}
@@ -1730,8 +1739,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
msg << kLogEventsNames[CodeEventListener::TICK_EVENT] << kNext
- << reinterpret_cast<void*>(sample->pc) << kNext
- << timer_.Elapsed().InMicroseconds();
+ << reinterpret_cast<void*>(sample->pc) << kNext << Time();
if (sample->has_external_callback) {
msg << kNext << 1 << kNext
<< reinterpret_cast<void*>(sample->external_callback_entry);
@@ -1757,9 +1765,9 @@ void Logger::ICEvent(const char* type, bool keyed, Handle<Map> map,
int line;
int column;
Address pc = isolate_->GetAbstractPC(&line, &column);
- msg << type << kNext << reinterpret_cast<void*>(pc) << kNext
- << timer_.Elapsed().InMicroseconds() << kNext << line << kNext << column
- << kNext << old_state << kNext << new_state << kNext
+ msg << type << kNext << reinterpret_cast<void*>(pc) << kNext << Time()
+ << kNext << line << kNext << column << kNext << old_state << kNext
+ << new_state << kNext
<< AsHex::Address(map.is_null() ? kNullAddress : map->ptr()) << kNext;
if (key->IsSmi()) {
msg << Smi::ToInt(*key);
@@ -1789,11 +1797,11 @@ void Logger::MapEvent(const char* type, Handle<Map> from, Handle<Map> to,
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- msg << "map" << kNext << type << kNext << timer_.Elapsed().InMicroseconds()
- << kNext << AsHex::Address(from.is_null() ? kNullAddress : from->ptr())
- << kNext << AsHex::Address(to.is_null() ? kNullAddress : to->ptr())
- << kNext << AsHex::Address(pc) << kNext << line << kNext << column
- << kNext << reason << kNext;
+ msg << "map" << kNext << type << kNext << Time() << kNext
+ << AsHex::Address(from.is_null() ? kNullAddress : from->ptr()) << kNext
+ << AsHex::Address(to.is_null() ? kNullAddress : to->ptr()) << kNext
+ << AsHex::Address(pc) << kNext << line << kNext << column << kNext
+ << reason << kNext;
if (!name_or_sfi.is_null()) {
if (name_or_sfi->IsName()) {
@@ -1815,8 +1823,7 @@ void Logger::MapCreate(Map map) {
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- msg << "map-create" << kNext << timer_.Elapsed().InMicroseconds() << kNext
- << AsHex::Address(map.ptr());
+ msg << "map-create" << kNext << Time() << kNext << AsHex::Address(map.ptr());
msg.WriteToLogFile();
}
@@ -1826,8 +1833,8 @@ void Logger::MapDetails(Map map) {
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- msg << "map-details" << kNext << timer_.Elapsed().InMicroseconds() << kNext
- << AsHex::Address(map.ptr()) << kNext;
+ msg << "map-details" << kNext << Time() << kNext << AsHex::Address(map.ptr())
+ << kNext;
if (FLAG_trace_maps_details) {
std::ostringstream buffer;
map.PrintMapDetails(buffer);
@@ -2061,22 +2068,17 @@ bool Logger::SetUp(Isolate* isolate) {
ticker_ = std::make_unique<Ticker>(isolate, FLAG_prof_sampling_interval);
- bool activate_logging = false;
-
- if (Log::InitLogAtStart()) activate_logging = true;
+ if (Log::InitLogAtStart()) UpdateIsLogging(true);
timer_.Start();
if (FLAG_prof_cpp) {
+ UpdateIsLogging(true);
profiler_ = std::make_unique<Profiler>(isolate);
- activate_logging = true;
profiler_->Engage();
}
- if (activate_logging) {
- AddCodeEventListener(this);
- UpdateIsLogging(true);
- }
+ if (is_logging_) AddCodeEventListener(this);
return true;
}
@@ -2162,12 +2164,13 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
const char* description = "Unknown code from before profiling";
switch (abstract_code->kind()) {
case CodeKind::INTERPRETED_FUNCTION:
- case CodeKind::OPTIMIZED_FUNCTION:
+ case CodeKind::TURBOFAN:
case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
+ case CodeKind::TURBOPROP:
return; // We log this later using LogCompiledFunctions.
case CodeKind::BYTECODE_HANDLER:
return; // We log it later by walking the dispatch table.
- case CodeKind::STUB:
+ case CodeKind::FOR_TESTING:
description = "STUB code";
tag = CodeEventListener::STUB_TAG;
break;
@@ -2237,7 +2240,7 @@ void ExistingCodeLogger::LogCompiledFunctions() {
// GetScriptLineNumber call.
for (int i = 0; i < compiled_funcs_count; ++i) {
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, sfis[i]);
- if (sfis[i]->function_data().IsInterpreterData()) {
+ if (sfis[i]->function_data(kAcquireLoad).IsInterpreterData()) {
LogExistingFunction(
sfis[i],
Handle<AbstractCode>(
@@ -2287,7 +2290,7 @@ void ExistingCodeLogger::LogExistingFunction(
} else if (shared->IsApiFunction()) {
// API function.
FunctionTemplateInfo fun_data = shared->get_api_func_data();
- Object raw_call_data = fun_data.call_code();
+ Object raw_call_data = fun_data.call_code(kAcquireLoad);
if (!raw_call_data.IsUndefined(isolate_)) {
CallHandlerInfo call_data = CallHandlerInfo::cast(raw_call_data);
Object callback_obj = call_data.callback();
diff --git a/deps/v8/src/logging/log.h b/deps/v8/src/logging/log.h
index aa9b0c4237..303fbd236a 100644
--- a/deps/v8/src/logging/log.h
+++ b/deps/v8/src/logging/log.h
@@ -314,6 +314,8 @@ class Logger : public CodeEventListener {
// each script is logged only once.
bool EnsureLogScriptSource(Script script);
+ int64_t Time();
+
Isolate* isolate_;
// The sampler used by the profiler and the sliding state window.
diff --git a/deps/v8/src/logging/metrics.h b/deps/v8/src/logging/metrics.h
index 615b8b5498..0b59deb1fd 100644
--- a/deps/v8/src/logging/metrics.h
+++ b/deps/v8/src/logging/metrics.h
@@ -85,9 +85,7 @@ template <class T, int64_t (base::TimeDelta::*precision)() const =
&base::TimeDelta::InMicroseconds>
class TimedScope {
public:
- TimedScope(T* event, int64_t T::*time) : event_(event), time_(time) {
- Start();
- }
+ explicit TimedScope(T* event) : event_(event) { Start(); }
~TimedScope() { Stop(); }
void Start() { start_time_ = base::TimeTicks::Now(); }
@@ -95,13 +93,12 @@ class TimedScope {
void Stop() {
if (start_time_.IsMin()) return;
base::TimeDelta duration = base::TimeTicks::Now() - start_time_;
- event_->*time_ = (duration.*precision)();
+ event_->wall_clock_duration_in_us = (duration.*precision)();
start_time_ = base::TimeTicks::Min();
}
private:
T* event_;
- int64_t T::*time_;
base::TimeTicks start_time_;
};
diff --git a/deps/v8/src/numbers/DIR_METADATA b/deps/v8/src/numbers/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/numbers/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/numbers/OWNERS b/deps/v8/src/numbers/OWNERS
index 882d275fe8..c4022e3ada 100644
--- a/deps/v8/src/numbers/OWNERS
+++ b/deps/v8/src/numbers/OWNERS
@@ -3,5 +3,3 @@ jgruber@chromium.org
jkummerow@chromium.org
sigurds@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/objects/DIR_METADATA b/deps/v8/src/objects/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/objects/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/objects/OWNERS b/deps/v8/src/objects/OWNERS
index f52e1c9ca8..48d72aea5e 100644
--- a/deps/v8/src/objects/OWNERS
+++ b/deps/v8/src/objects/OWNERS
@@ -1,3 +1 @@
file:../../COMMON_OWNERS
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/objects/all-objects-inl.h b/deps/v8/src/objects/all-objects-inl.h
new file mode 100644
index 0000000000..6e7c7a59ce
--- /dev/null
+++ b/deps/v8/src/objects/all-objects-inl.h
@@ -0,0 +1,104 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_ALL_OBJECTS_INL_H_
+#define V8_OBJECTS_ALL_OBJECTS_INL_H_
+
+// This file includes all inline headers from src/objects, which is handy for
+// compilation units that need it like object printing or verification.
+// New inline headers should be added here.
+
+#include "src/objects/allocation-site-inl.h"
+#include "src/objects/allocation-site-scopes-inl.h"
+#include "src/objects/api-callbacks-inl.h"
+#include "src/objects/arguments-inl.h"
+#include "src/objects/bigint-inl.h"
+#include "src/objects/cell-inl.h"
+#include "src/objects/code-inl.h"
+#include "src/objects/compilation-cache-table-inl.h"
+#include "src/objects/compressed-slots-inl.h"
+#include "src/objects/contexts-inl.h"
+#include "src/objects/data-handler-inl.h"
+#include "src/objects/debug-objects-inl.h"
+#include "src/objects/descriptor-array-inl.h"
+#include "src/objects/dictionary-inl.h"
+#include "src/objects/elements-inl.h"
+#include "src/objects/embedder-data-array-inl.h"
+#include "src/objects/embedder-data-slot-inl.h"
+#include "src/objects/feedback-cell-inl.h"
+#include "src/objects/feedback-vector-inl.h"
+#include "src/objects/field-index-inl.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/foreign-inl.h"
+#include "src/objects/frame-array-inl.h"
+#include "src/objects/free-space-inl.h"
+#include "src/objects/hash-table-inl.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/heap-object-inl.h"
+#include "src/objects/instance-type-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
+#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-function-inl.h"
+#include "src/objects/js-generator-inl.h"
+#include "src/objects/js-objects-inl.h"
+#include "src/objects/js-promise-inl.h"
+#include "src/objects/js-proxy-inl.h"
+#include "src/objects/js-regexp-inl.h"
+#include "src/objects/js-regexp-string-iterator-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
+#include "src/objects/layout-descriptor-inl.h"
+#include "src/objects/literal-objects-inl.h"
+#include "src/objects/lookup-cache-inl.h"
+#include "src/objects/lookup-inl.h"
+#include "src/objects/map-inl.h"
+#include "src/objects/maybe-object-inl.h"
+#include "src/objects/microtask-inl.h"
+#include "src/objects/module-inl.h"
+#include "src/objects/name-inl.h"
+#include "src/objects/objects-body-descriptors-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/oddball-inl.h"
+#include "src/objects/ordered-hash-table-inl.h"
+#include "src/objects/osr-optimized-code-cache-inl.h"
+#include "src/objects/primitive-heap-object-inl.h"
+#include "src/objects/promise-inl.h"
+#include "src/objects/property-array-inl.h"
+#include "src/objects/property-cell-inl.h"
+#include "src/objects/property-descriptor-object-inl.h"
+#include "src/objects/prototype-info-inl.h"
+#include "src/objects/script-inl.h"
+#include "src/objects/shared-function-info-inl.h"
+#include "src/objects/slots-atomic-inl.h"
+#include "src/objects/slots-inl.h"
+#include "src/objects/stack-frame-info-inl.h"
+#include "src/objects/string-inl.h"
+#include "src/objects/string-set-inl.h"
+#include "src/objects/string-table-inl.h"
+#include "src/objects/struct-inl.h"
+#include "src/objects/synthetic-module-inl.h"
+#include "src/objects/tagged-field-inl.h"
+#include "src/objects/tagged-impl-inl.h"
+#include "src/objects/tagged-value-inl.h"
+#include "src/objects/template-objects-inl.h"
+#include "src/objects/templates-inl.h"
+#include "src/objects/torque-defined-classes-inl.h"
+#include "src/objects/transitions-inl.h"
+
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-break-iterator-inl.h"
+#include "src/objects/js-collator-inl.h"
+#include "src/objects/js-date-time-format-inl.h"
+#include "src/objects/js-display-names-inl.h"
+#include "src/objects/js-list-format-inl.h"
+#include "src/objects/js-locale-inl.h"
+#include "src/objects/js-number-format-inl.h"
+#include "src/objects/js-plural-rules-inl.h"
+#include "src/objects/js-relative-time-format-inl.h"
+#include "src/objects/js-segment-iterator-inl.h"
+#include "src/objects/js-segmenter-inl.h"
+#include "src/objects/js-segments-inl.h"
+#endif // V8_INTL_SUPPORT
+
+#endif // V8_OBJECTS_ALL_OBJECTS_INL_H_
diff --git a/deps/v8/src/objects/allocation-site-inl.h b/deps/v8/src/objects/allocation-site-inl.h
index 7447eb5ec3..d9911bc826 100644
--- a/deps/v8/src/objects/allocation-site-inl.h
+++ b/deps/v8/src/objects/allocation-site-inl.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/allocation-site-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(AllocationMemento)
OBJECT_CONSTRUCTORS_IMPL(AllocationSite, Struct)
diff --git a/deps/v8/src/objects/allocation-site.h b/deps/v8/src/objects/allocation-site.h
index 1da5925bee..437876d94c 100644
--- a/deps/v8/src/objects/allocation-site.h
+++ b/deps/v8/src/objects/allocation-site.h
@@ -16,6 +16,8 @@ namespace internal {
enum InstanceType : uint16_t;
+#include "torque-generated/src/objects/allocation-site-tq.inc"
+
class AllocationSite : public Struct {
public:
NEVER_READ_ONLY_SPACE
diff --git a/deps/v8/src/objects/api-callbacks-inl.h b/deps/v8/src/objects/api-callbacks-inl.h
index 8fa0f40c73..1572a3b352 100644
--- a/deps/v8/src/objects/api-callbacks-inl.h
+++ b/deps/v8/src/objects/api-callbacks-inl.h
@@ -13,7 +13,6 @@
#include "src/objects/js-objects-inl.h"
#include "src/objects/name.h"
#include "src/objects/templates.h"
-#include "torque-generated/class-definitions-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -21,6 +20,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/api-callbacks-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(AccessCheckInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(AccessorInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(InterceptorInfo)
diff --git a/deps/v8/src/objects/api-callbacks.h b/deps/v8/src/objects/api-callbacks.h
index ffd9e9f02e..f5d81dd986 100644
--- a/deps/v8/src/objects/api-callbacks.h
+++ b/deps/v8/src/objects/api-callbacks.h
@@ -7,7 +7,6 @@
#include "src/objects/struct.h"
#include "torque-generated/bit-fields.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/api-callbacks-tq.inc"
+
// An accessor must have a getter, but can have no setter.
//
// When setting a property, V8 searches accessors in prototypes.
diff --git a/deps/v8/src/objects/arguments-inl.h b/deps/v8/src/objects/arguments-inl.h
index 494a8960bd..b2576a6c8b 100644
--- a/deps/v8/src/objects/arguments-inl.h
+++ b/deps/v8/src/objects/arguments-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/arguments-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSArgumentsObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(AliasedArgumentsEntry)
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index d8cbdbae50..372fc745e4 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/arguments-tq.inc"
+
// Superclass for all objects with instance type {JS_ARGUMENTS_OBJECT_TYPE}
class JSArgumentsObject
: public TorqueGeneratedJSArgumentsObject<JSArgumentsObject, JSObject> {
diff --git a/deps/v8/src/objects/backing-store.cc b/deps/v8/src/objects/backing-store.cc
index c67fff0fa9..9f755f5d04 100644
--- a/deps/v8/src/objects/backing-store.cc
+++ b/deps/v8/src/objects/backing-store.cc
@@ -9,6 +9,7 @@
#include "src/execution/isolate.h"
#include "src/handles/global-handles.h"
#include "src/logging/counters.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
@@ -23,12 +24,6 @@ namespace v8 {
namespace internal {
namespace {
-#if V8_TARGET_ARCH_64_BIT
-constexpr bool kUseGuardRegions = true;
-#else
-constexpr bool kUseGuardRegions = false;
-#endif
-
#if V8_TARGET_ARCH_MIPS64
// MIPS64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller.
@@ -39,11 +34,10 @@ constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB
#endif
-constexpr uint64_t kOneGiB = 1024 * 1024 * 1024;
-constexpr uint64_t kNegativeGuardSize = 2 * kOneGiB;
+constexpr uint64_t kNegativeGuardSize = uint64_t{2} * GB;
#if V8_TARGET_ARCH_64_BIT
-constexpr uint64_t kFullGuardSize = 10 * kOneGiB;
+constexpr uint64_t kFullGuardSize = uint64_t{10} * GB;
#endif
std::atomic<uint64_t> reserved_address_space_{0};
@@ -62,30 +56,26 @@ enum class AllocationStatus {
kOtherFailure // Failed for an unknown reason
};
+base::AddressRegion GetReservedRegion(bool has_guard_regions,
+ void* buffer_start,
+ size_t byte_capacity) {
#if V8_TARGET_ARCH_64_BIT
-base::AddressRegion GetGuardedRegion(void* buffer_start, size_t byte_length) {
- // Guard regions always look like this:
- // |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx|
- // ^ buffer_start
- // ^ byte_length
- // ^ negative guard region ^ positive guard region
-
- Address start = reinterpret_cast<Address>(buffer_start);
- DCHECK_EQ(8, sizeof(size_t)); // only use on 64-bit
- DCHECK_EQ(0, start % AllocatePageSize());
- return base::AddressRegion(start - (2 * kOneGiB),
- static_cast<size_t>(kFullGuardSize));
-}
+ if (has_guard_regions) {
+ // Guard regions always look like this:
+ // |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx|
+ // ^ buffer_start
+ // ^ byte_length
+ // ^ negative guard region ^ positive guard region
+
+ Address start = reinterpret_cast<Address>(buffer_start);
+ DCHECK_EQ(8, sizeof(size_t)); // only use on 64-bit
+ DCHECK_EQ(0, start % AllocatePageSize());
+ return base::AddressRegion(start - kNegativeGuardSize,
+ static_cast<size_t>(kFullGuardSize));
+ }
#endif
-base::AddressRegion GetRegion(bool has_guard_regions, void* buffer_start,
- size_t byte_length, size_t byte_capacity) {
-#if V8_TARGET_ARCH_64_BIT
- if (has_guard_regions) return GetGuardedRegion(buffer_start, byte_length);
-#else
DCHECK(!has_guard_regions);
-#endif
-
return base::AddressRegion(reinterpret_cast<Address>(buffer_start),
byte_capacity);
}
@@ -173,8 +163,11 @@ BackingStore::~BackingStore() {
if (is_wasm_memory_) {
DCHECK(free_on_destruct_);
DCHECK(!custom_deleter_);
- TRACE_BS("BSw:free bs=%p mem=%p (length=%zu, capacity=%zu)\n", this,
- buffer_start_, byte_length(), byte_capacity_);
+ size_t reservation_size =
+ GetReservationSize(has_guard_regions_, byte_capacity_);
+ TRACE_BS(
+ "BSw:free bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n",
+ this, buffer_start_, byte_length(), byte_capacity_, reservation_size);
if (is_shared_) {
// Deallocate the list of attached memory objects.
SharedWasmMemoryData* shared_data = get_shared_wasm_memory_data();
@@ -183,22 +176,21 @@ BackingStore::~BackingStore() {
}
// Wasm memories are always allocated through the page allocator.
- auto region = GetRegion(has_guard_regions_, buffer_start_, byte_length_,
- byte_capacity_);
+ auto region =
+ GetReservedRegion(has_guard_regions_, buffer_start_, byte_capacity_);
bool pages_were_freed =
region.size() == 0 /* no need to free any pages */ ||
FreePages(GetPlatformPageAllocator(),
reinterpret_cast<void*>(region.begin()), region.size());
CHECK(pages_were_freed);
- BackingStore::ReleaseReservation(
- GetReservationSize(has_guard_regions_, byte_capacity_));
+ BackingStore::ReleaseReservation(reservation_size);
Clear();
return;
}
if (custom_deleter_) {
DCHECK(free_on_destruct_);
- TRACE_BS("BS:custome deleter bs=%p mem=%p (length=%zu, capacity=%zu)\n",
+ TRACE_BS("BS:custom deleter bs=%p mem=%p (length=%zu, capacity=%zu)\n",
this, buffer_start_, byte_length(), byte_capacity_);
type_specific_data_.deleter.callback(buffer_start_, byte_length_,
type_specific_data_.deleter.data);
@@ -304,7 +296,7 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
TRACE_BS("BSw:try %zu pages, %zu max\n", initial_pages, maximum_pages);
- bool guards = kUseGuardRegions;
+ bool guards = trap_handler::IsTrapHandlerEnabled();
// For accounting purposes, whether a GC was necessary.
bool did_retry = false;
@@ -348,7 +340,8 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
FATAL("could not allocate wasm memory backing store");
}
RecordStatus(isolate, AllocationStatus::kAddressSpaceLimitReachedFailure);
- TRACE_BS("BSw:try failed to reserve address space\n");
+ TRACE_BS("BSw:try failed to reserve address space (size %zu)\n",
+ reservation_size);
return {};
}
@@ -385,9 +378,10 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
PageAllocator::kReadWrite);
};
if (!gc_retry(commit_memory)) {
+ TRACE_BS("BSw:try failed to set permissions (%p, %zu)\n", buffer_start,
+ byte_length);
// SetPermissions put us over the process memory limit.
V8::FatalProcessOutOfMemory(nullptr, "BackingStore::AllocateWasmMemory()");
- TRACE_BS("BSw:try failed to set permissions\n");
}
DebugCheckZero(buffer_start, byte_length); // touch the bytes.
@@ -405,8 +399,10 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
false, // custom_deleter
false); // empty_deleter
- TRACE_BS("BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu)\n", result,
- result->buffer_start(), byte_length, byte_capacity);
+ TRACE_BS(
+ "BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n",
+ result, result->buffer_start(), byte_length, byte_capacity,
+ reservation_size);
// Shared Wasm memories need an anchor for the memory object list.
if (shared == SharedFlag::kShared) {
diff --git a/deps/v8/src/objects/backing-store.h b/deps/v8/src/objects/backing-store.h
index 0a460cef8a..18505baf67 100644
--- a/deps/v8/src/objects/backing-store.h
+++ b/deps/v8/src/objects/backing-store.h
@@ -157,6 +157,8 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
globally_registered_(false),
custom_deleter_(custom_deleter),
empty_deleter_(empty_deleter) {}
+ BackingStore(const BackingStore&) = delete;
+ BackingStore& operator=(const BackingStore&) = delete;
void SetAllocatorFromIsolate(Isolate* isolate);
void* buffer_start_ = nullptr;
@@ -209,8 +211,6 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
static std::unique_ptr<BackingStore> TryAllocateWasmMemory(
Isolate* isolate, size_t initial_pages, size_t maximum_pages,
SharedFlag shared);
-
- DISALLOW_COPY_AND_ASSIGN(BackingStore);
};
// A global, per-process mapping from buffer addresses to backing stores.
diff --git a/deps/v8/src/objects/bigint-inl.h b/deps/v8/src/objects/bigint-inl.h
new file mode 100644
index 0000000000..1455bed92d
--- /dev/null
+++ b/deps/v8/src/objects/bigint-inl.h
@@ -0,0 +1,24 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_BIGINT_INL_H_
+#define V8_OBJECTS_BIGINT_INL_H_
+
+#include "src/objects/bigint.h"
+#include "src/objects/objects-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/bigint-tq-inl.inc"
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_BIGINT_INL_H_
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index fbbfbeb69d..129bb14b20 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -715,7 +715,7 @@ MaybeHandle<MutableBigInt> MutableBigInt::BitwiseAnd(Isolate* isolate,
if (!x->sign() && !y->sign()) {
return AbsoluteAnd(isolate, x, y);
} else if (x->sign() && y->sign()) {
- int result_length = Max(x->length(), y->length()) + 1;
+ int result_length = std::max(x->length(), y->length()) + 1;
// (-x) & (-y) == ~(x-1) & ~(y-1) == ~((x-1) | (y-1))
// == -(((x-1) | (y-1)) + 1)
Handle<MutableBigInt> result;
@@ -746,7 +746,7 @@ MaybeHandle<MutableBigInt> MutableBigInt::BitwiseXor(Isolate* isolate,
if (!x->sign() && !y->sign()) {
return AbsoluteXor(isolate, x, y);
} else if (x->sign() && y->sign()) {
- int result_length = Max(x->length(), y->length());
+ int result_length = std::max(x->length(), y->length());
// (-x) ^ (-y) == ~(x-1) ^ ~(y-1) == (x-1) ^ (y-1)
Handle<MutableBigInt> result =
AbsoluteSubOne(isolate, x, result_length).ToHandleChecked();
@@ -754,7 +754,7 @@ MaybeHandle<MutableBigInt> MutableBigInt::BitwiseXor(Isolate* isolate,
return AbsoluteXor(isolate, result, y_1, *result);
} else {
DCHECK(x->sign() != y->sign());
- int result_length = Max(x->length(), y->length()) + 1;
+ int result_length = std::max(x->length(), y->length()) + 1;
// Assume that x is the positive BigInt.
if (x->sign()) std::swap(x, y);
// x ^ (-y) == x ^ ~(y-1) == ~(x ^ (y-1)) == -((x ^ (y-1)) + 1)
@@ -775,7 +775,7 @@ MaybeHandle<BigInt> BigInt::BitwiseOr(Isolate* isolate, Handle<BigInt> x,
MaybeHandle<MutableBigInt> MutableBigInt::BitwiseOr(Isolate* isolate,
Handle<BigInt> x,
Handle<BigInt> y) {
- int result_length = Max(x->length(), y->length());
+ int result_length = std::max(x->length(), y->length());
if (!x->sign() && !y->sign()) {
return AbsoluteOr(isolate, x, y);
} else if (x->sign() && y->sign()) {
@@ -1371,7 +1371,7 @@ inline Handle<MutableBigInt> MutableBigInt::AbsoluteBitwiseOp(
std::swap(x_length, y_length);
}
}
- DCHECK(num_pairs == Min(x_length, y_length));
+ DCHECK(num_pairs == std::min(x_length, y_length));
Handle<MutableBigInt> result(result_storage, isolate);
int result_length = extra_digits == kCopy ? x_length : num_pairs;
if (result_storage.is_null()) {
@@ -1872,6 +1872,8 @@ Handle<BigInt> MutableBigInt::RightShiftByAbsolute(Isolate* isolate,
DCHECK_LE(result_length, length);
Handle<MutableBigInt> result = New(isolate, result_length).ToHandleChecked();
if (bits_shift == 0) {
+ // Zero out any overflow digit (see "rounding_can_overflow" above).
+ result->set_digit(result_length - 1, 0);
for (int i = digit_shift; i < length; i++) {
result->set_digit(i - digit_shift, x->digit(i));
}
@@ -2392,7 +2394,7 @@ Handle<BigInt> MutableBigInt::TruncateAndSubFromPowerOfTwo(Isolate* isolate,
int x_length = x->length();
digit_t borrow = 0;
// Take digits from {x} unless its length is exhausted.
- int limit = Min(last, x_length);
+ int limit = std::min(last, x_length);
for (; i < limit; i++) {
digit_t new_borrow = 0;
digit_t difference = digit_sub(0, x->digit(i), &new_borrow);
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index 4fdd9b1501..a7494a54c5 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -26,6 +26,8 @@ class BigInt;
class ValueDeserializer;
class ValueSerializer;
+#include "torque-generated/src/objects/bigint-tq.inc"
+
// BigIntBase is just the raw data object underlying a BigInt. Use with care!
// Most code should be using BigInts instead.
class BigIntBase : public PrimitiveHeapObject {
diff --git a/deps/v8/src/objects/bigint.tq b/deps/v8/src/objects/bigint.tq
new file mode 100644
index 0000000000..60be844cc6
--- /dev/null
+++ b/deps/v8/src/objects/bigint.tq
@@ -0,0 +1,21 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(nicohartmann): Discuss whether types used by multiple builtins should be
+// in global namespace
+extern class BigIntBase extends PrimitiveHeapObject
+ generates 'TNode<BigInt>' {}
+
+type BigInt extends BigIntBase;
+
+@noVerifier
+@hasSameInstanceTypeAsParent
+@doNotGenerateCast
+extern class MutableBigInt extends BigIntBase generates 'TNode<BigInt>' {
+}
+
+Convert<BigInt, MutableBigInt>(i: MutableBigInt): BigInt {
+ assert(bigint::IsCanonicalized(i));
+ return %RawDownCast<BigInt>(Convert<BigIntBase>(i));
+}
diff --git a/deps/v8/src/objects/cell-inl.h b/deps/v8/src/objects/cell-inl.h
index 0bd6808fbc..dbfdb3ae98 100644
--- a/deps/v8/src/objects/cell-inl.h
+++ b/deps/v8/src/objects/cell-inl.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/cell-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(Cell)
Cell Cell::FromValueAddress(Address value) {
diff --git a/deps/v8/src/objects/cell.h b/deps/v8/src/objects/cell.h
index de43897350..da75249990 100644
--- a/deps/v8/src/objects/cell.h
+++ b/deps/v8/src/objects/cell.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_CELL_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -14,6 +13,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/cell-tq.inc"
+
class Cell : public TorqueGeneratedCell<Cell, HeapObject> {
public:
static inline Cell FromValueAddress(Address value);
diff --git a/deps/v8/src/objects/class-definitions-tq-deps-inl.h b/deps/v8/src/objects/class-definitions-tq-deps-inl.h
deleted file mode 100644
index ad046cbf8b..0000000000
--- a/deps/v8/src/objects/class-definitions-tq-deps-inl.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_OBJECTS_CLASS_DEFINITIONS_TQ_DEPS_INL_H_
-#define V8_OBJECTS_CLASS_DEFINITIONS_TQ_DEPS_INL_H_
-
-// This is a collection of -inl.h files required by the generated file
-// class-definitions.cc. Generally, classes using @generateCppClass need an
-// entry here.
-#include "src/objects/allocation-site-inl.h"
-#include "src/objects/arguments-inl.h"
-#include "src/objects/embedder-data-array-inl.h"
-#include "src/objects/free-space-inl.h"
-#include "src/objects/js-collection-inl.h"
-#include "src/objects/js-generator-inl.h"
-#include "src/objects/js-regexp-inl.h"
-#include "src/objects/js-regexp-string-iterator-inl.h"
-#include "src/objects/js-weak-refs-inl.h"
-#include "src/objects/literal-objects-inl.h"
-#include "src/objects/microtask-inl.h"
-#include "src/objects/module-inl.h"
-#include "src/objects/promise-inl.h"
-#include "src/objects/property-descriptor-object-inl.h"
-#include "src/objects/stack-frame-info-inl.h"
-#include "src/objects/struct-inl.h"
-#include "src/objects/template-objects-inl.h"
-
-#ifdef V8_INTL_SUPPORT
-#include "src/objects/js-break-iterator-inl.h"
-#include "src/objects/js-collator-inl.h"
-#include "src/objects/js-date-time-format-inl.h"
-#include "src/objects/js-display-names-inl.h"
-#include "src/objects/js-list-format-inl.h"
-#include "src/objects/js-locale-inl.h"
-#include "src/objects/js-number-format-inl.h"
-#include "src/objects/js-plural-rules-inl.h"
-#include "src/objects/js-relative-time-format-inl.h"
-#include "src/objects/js-segment-iterator-inl.h"
-#include "src/objects/js-segmenter-inl.h"
-#include "src/objects/js-segments-inl.h"
-#endif
-
-#endif // V8_OBJECTS_CLASS_DEFINITIONS_TQ_DEPS_INL_H_
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 3c772f855a..1a928b20f1 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -70,13 +70,6 @@ int AbstractCode::SizeIncludingMetadata() {
return GetBytecodeArray().SizeIncludingMetadata();
}
}
-int AbstractCode::ExecutableSize() {
- if (IsCode()) {
- return GetCode().ExecutableSize();
- } else {
- return GetBytecodeArray().BytecodeArraySize();
- }
-}
Address AbstractCode::raw_instruction_start() {
if (IsCode()) {
@@ -171,24 +164,25 @@ OBJECT_CONSTRUCTORS_IMPL(Code, HeapObject)
NEVER_READ_ONLY_SPACE_IMPL(Code)
INT_ACCESSORS(Code, raw_instruction_size, kInstructionSizeOffset)
-INT_ACCESSORS(Code, safepoint_table_offset, kSafepointTableOffsetOffset)
+INT_ACCESSORS(Code, raw_metadata_size, kMetadataSizeOffset)
INT_ACCESSORS(Code, handler_table_offset, kHandlerTableOffsetOffset)
INT_ACCESSORS(Code, code_comments_offset, kCodeCommentsOffsetOffset)
+INT32_ACCESSORS(Code, unwinding_info_offset, kUnwindingInfoOffsetOffset)
#define CODE_ACCESSORS(name, type, offset) \
ACCESSORS_CHECKED2(Code, name, type, offset, true, \
!ObjectInYoungGeneration(value))
-#define SYNCHRONIZED_CODE_ACCESSORS(name, type, offset) \
- SYNCHRONIZED_ACCESSORS_CHECKED2(Code, name, type, offset, true, \
- !ObjectInYoungGeneration(value))
+#define RELEASE_ACQUIRE_CODE_ACCESSORS(name, type, offset) \
+ RELEASE_ACQUIRE_ACCESSORS_CHECKED2(Code, name, type, offset, true, \
+ !ObjectInYoungGeneration(value))
CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
CODE_ACCESSORS(source_position_table, Object, kSourcePositionTableOffset)
// Concurrent marker needs to access kind specific flags in code data container.
-SYNCHRONIZED_CODE_ACCESSORS(code_data_container, CodeDataContainer,
- kCodeDataContainerOffset)
+RELEASE_ACQUIRE_CODE_ACCESSORS(code_data_container, CodeDataContainer,
+ kCodeDataContainerOffset)
#undef CODE_ACCESSORS
-#undef SYNCHRONIZED_CODE_ACCESSORS
+#undef RELEASE_ACQUIRE_CODE_ACCESSORS
void Code::WipeOutHeader() {
WRITE_FIELD(*this, kRelocationInfoOffset, Smi::FromInt(0));
@@ -198,14 +192,16 @@ void Code::WipeOutHeader() {
}
void Code::clear_padding() {
+ // Clear the padding between the header and `raw_body_start`.
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
FIELD_SIZE(kOptionalPaddingOffset));
}
- Address data_end =
- has_unwinding_info() ? unwinding_info_end() : raw_instruction_end();
- memset(reinterpret_cast<void*>(data_end), 0,
- CodeSize() - (data_end - address()));
+
+ // Clear the padding after `raw_body_end`.
+ size_t trailing_padding_size =
+ CodeSize() - Code::kHeaderSize - raw_body_size();
+ memset(reinterpret_cast<void*>(raw_body_end()), 0, trailing_padding_size);
}
ByteArray Code::SourcePositionTable() const {
@@ -217,25 +213,35 @@ ByteArray Code::SourcePositionTable() const {
}
Object Code::next_code_link() const {
- return code_data_container().next_code_link();
+ return code_data_container(kAcquireLoad).next_code_link();
}
void Code::set_next_code_link(Object value) {
- code_data_container().set_next_code_link(value);
+ code_data_container(kAcquireLoad).set_next_code_link(value);
+}
+
+Address Code::raw_body_start() const { return raw_instruction_start(); }
+
+Address Code::raw_body_end() const {
+ return raw_body_start() + raw_body_size();
+}
+
+int Code::raw_body_size() const {
+ return raw_instruction_size() + raw_metadata_size();
}
int Code::InstructionSize() const {
- if (is_off_heap_trampoline()) return OffHeapInstructionSize();
- return raw_instruction_size();
+ return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapInstructionSize()
+ : raw_instruction_size();
}
Address Code::raw_instruction_start() const {
- return FIELD_ADDR(*this, kHeaderSize);
+ return field_address(kHeaderSize);
}
Address Code::InstructionStart() const {
- if (is_off_heap_trampoline()) return OffHeapInstructionStart();
- return raw_instruction_start();
+ return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapInstructionStart()
+ : raw_instruction_start();
}
Address Code::raw_instruction_end() const {
@@ -243,41 +249,32 @@ Address Code::raw_instruction_end() const {
}
Address Code::InstructionEnd() const {
- if (is_off_heap_trampoline()) return OffHeapInstructionEnd();
- return raw_instruction_end();
-}
-
-int Code::GetUnwindingInfoSizeOffset() const {
- DCHECK(has_unwinding_info());
- return RoundUp(kHeaderSize + raw_instruction_size(), kInt64Size);
+ return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapInstructionEnd()
+ : raw_instruction_end();
}
-int Code::unwinding_info_size() const {
- DCHECK(has_unwinding_info());
- return static_cast<int>(ReadField<uint64_t>(GetUnwindingInfoSizeOffset()));
+Address Code::raw_metadata_start() const {
+ return raw_instruction_start() + raw_instruction_size();
}
-void Code::set_unwinding_info_size(int value) {
- DCHECK(has_unwinding_info());
- WriteField<uint64_t>(GetUnwindingInfoSizeOffset(), value);
+Address Code::MetadataStart() const {
+ STATIC_ASSERT(kOnHeapBodyIsContiguous);
+ return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapMetadataStart()
+ : raw_metadata_start();
}
-Address Code::unwinding_info_start() const {
- DCHECK(has_unwinding_info());
- return FIELD_ADDR(*this, GetUnwindingInfoSizeOffset()) + kInt64Size;
+Address Code::raw_metadata_end() const {
+ return raw_metadata_start() + raw_metadata_size();
}
-Address Code::unwinding_info_end() const {
- DCHECK(has_unwinding_info());
- return unwinding_info_start() + unwinding_info_size();
+Address Code::MetadataEnd() const {
+ return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapMetadataEnd()
+ : raw_metadata_end();
}
-int Code::body_size() const {
- int unpadded_body_size =
- has_unwinding_info()
- ? static_cast<int>(unwinding_info_end() - raw_instruction_start())
- : raw_instruction_size();
- return RoundUp(unpadded_body_size, kObjectAlignment);
+int Code::MetadataSize() const {
+ return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapMetadataSize()
+ : raw_metadata_size();
}
int Code::SizeIncludingMetadata() const {
@@ -288,7 +285,7 @@ int Code::SizeIncludingMetadata() const {
}
ByteArray Code::unchecked_relocation_info() const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return ByteArray::unchecked_cast(
TaggedField<HeapObject, kRelocationInfoOffset>::load(isolate, *this));
}
@@ -317,13 +314,6 @@ bool Code::contains(Address inner_pointer) {
return (address() <= inner_pointer) && (inner_pointer < address() + Size());
}
-int Code::ExecutableSize() const {
- // Check that the assumptions about the layout of the code object holds.
- DCHECK_EQ(static_cast<int>(raw_instruction_start() - address()),
- Code::kHeaderSize);
- return raw_instruction_size() + Code::kHeaderSize;
-}
-
// static
void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) {
DCHECK_EQ(dest.length(), desc.reloc_size);
@@ -332,20 +322,18 @@ void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) {
static_cast<size_t>(desc.reloc_size));
}
-int Code::CodeSize() const { return SizeFor(body_size()); }
+int Code::CodeSize() const { return SizeFor(raw_body_size()); }
CodeKind Code::kind() const {
STATIC_ASSERT(FIELD_SIZE(kFlagsOffset) == kInt32Size);
return KindField::decode(ReadField<uint32_t>(kFlagsOffset));
}
-void Code::initialize_flags(CodeKind kind, bool has_unwinding_info,
- bool is_turbofanned, int stack_slots,
+void Code::initialize_flags(CodeKind kind, bool is_turbofanned, int stack_slots,
bool is_off_heap_trampoline) {
CHECK(0 <= stack_slots && stack_slots < StackSlotsField::kMax);
DCHECK(!CodeKindIsInterpretedJSFunction(kind));
- uint32_t flags = HasUnwindingInfoField::encode(has_unwinding_info) |
- KindField::encode(kind) |
+ uint32_t flags = KindField::encode(kind) |
IsTurbofannedField::encode(is_turbofanned) |
StackSlotsField::encode(stack_slots) |
IsOffHeapTrampoline::encode(is_off_heap_trampoline);
@@ -368,7 +356,7 @@ inline bool Code::checks_optimization_marker() const {
bool checks_marker =
(builtin_index() == Builtins::kCompileLazy ||
builtin_index() == Builtins::kInterpreterEntryTrampoline ||
- CodeKindChecksOptimizationMarker(kind()));
+ CodeKindCanTierUp(kind()));
return checks_marker ||
(CodeKindCanDeoptimize(kind()) && marked_for_deoptimization());
}
@@ -378,51 +366,50 @@ inline bool Code::has_tagged_params() const {
kind() != CodeKind::C_WASM_ENTRY && kind() != CodeKind::WASM_FUNCTION;
}
-inline bool Code::has_unwinding_info() const {
- return HasUnwindingInfoField::decode(ReadField<uint32_t>(kFlagsOffset));
-}
-
inline bool Code::is_turbofanned() const {
return IsTurbofannedField::decode(ReadField<uint32_t>(kFlagsOffset));
}
inline bool Code::can_have_weak_objects() const {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
- int32_t flags = code_data_container().kind_specific_flags();
+ int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
return CanHaveWeakObjectsField::decode(flags);
}
inline void Code::set_can_have_weak_objects(bool value) {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
- int32_t previous = code_data_container().kind_specific_flags();
+ CodeDataContainer container = code_data_container(kAcquireLoad);
+ int32_t previous = container.kind_specific_flags();
int32_t updated = CanHaveWeakObjectsField::update(previous, value);
- code_data_container().set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated);
}
inline bool Code::is_promise_rejection() const {
DCHECK(kind() == CodeKind::BUILTIN);
- int32_t flags = code_data_container().kind_specific_flags();
+ int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
return IsPromiseRejectionField::decode(flags);
}
inline void Code::set_is_promise_rejection(bool value) {
DCHECK(kind() == CodeKind::BUILTIN);
- int32_t previous = code_data_container().kind_specific_flags();
+ CodeDataContainer container = code_data_container(kAcquireLoad);
+ int32_t previous = container.kind_specific_flags();
int32_t updated = IsPromiseRejectionField::update(previous, value);
- code_data_container().set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated);
}
inline bool Code::is_exception_caught() const {
DCHECK(kind() == CodeKind::BUILTIN);
- int32_t flags = code_data_container().kind_specific_flags();
+ int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
return IsExceptionCaughtField::decode(flags);
}
inline void Code::set_is_exception_caught(bool value) {
DCHECK(kind() == CodeKind::BUILTIN);
- int32_t previous = code_data_container().kind_specific_flags();
+ CodeDataContainer container = code_data_container(kAcquireLoad);
+ int32_t previous = container.kind_specific_flags();
int32_t updated = IsExceptionCaughtField::update(previous, value);
- code_data_container().set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated);
}
inline bool Code::is_off_heap_trampoline() const {
@@ -437,16 +424,18 @@ inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
int Code::builtin_index() const {
int index = ReadField<int>(kBuiltinIndexOffset);
- DCHECK(index == -1 || Builtins::IsBuiltinId(index));
+ DCHECK(index == Builtins::kNoBuiltinId || Builtins::IsBuiltinId(index));
return index;
}
void Code::set_builtin_index(int index) {
- DCHECK(index == -1 || Builtins::IsBuiltinId(index));
+ DCHECK(index == Builtins::kNoBuiltinId || Builtins::IsBuiltinId(index));
WriteField<int>(kBuiltinIndexOffset, index);
}
-bool Code::is_builtin() const { return builtin_index() != -1; }
+bool Code::is_builtin() const {
+ return builtin_index() != Builtins::kNoBuiltinId;
+}
unsigned Code::inlined_bytecode_size() const {
DCHECK(CodeKindIsOptimizedJSFunction(kind()) ||
@@ -470,21 +459,22 @@ int Code::stack_slots() const {
bool Code::marked_for_deoptimization() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container().kind_specific_flags();
+ int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
return MarkedForDeoptimizationField::decode(flags);
}
void Code::set_marked_for_deoptimization(bool flag) {
DCHECK(CodeKindCanDeoptimize(kind()));
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
- int32_t previous = code_data_container().kind_specific_flags();
+ CodeDataContainer container = code_data_container(kAcquireLoad);
+ int32_t previous = container.kind_specific_flags();
int32_t updated = MarkedForDeoptimizationField::update(previous, flag);
- code_data_container().set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated);
}
int Code::deoptimization_count() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container().kind_specific_flags();
+ int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
int count = DeoptCountField::decode(flags);
DCHECK_GE(count, 0);
return count;
@@ -492,40 +482,43 @@ int Code::deoptimization_count() const {
void Code::increment_deoptimization_count() {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container().kind_specific_flags();
+ CodeDataContainer container = code_data_container(kAcquireLoad);
+ int32_t flags = container.kind_specific_flags();
int32_t count = DeoptCountField::decode(flags);
DCHECK_GE(count, 0);
CHECK_LE(count + 1, DeoptCountField::kMax);
int32_t updated = DeoptCountField::update(flags, count + 1);
- code_data_container().set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated);
}
bool Code::embedded_objects_cleared() const {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
- int32_t flags = code_data_container().kind_specific_flags();
+ int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
return EmbeddedObjectsClearedField::decode(flags);
}
void Code::set_embedded_objects_cleared(bool flag) {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
DCHECK_IMPLIES(flag, marked_for_deoptimization());
- int32_t previous = code_data_container().kind_specific_flags();
+ CodeDataContainer container = code_data_container(kAcquireLoad);
+ int32_t previous = container.kind_specific_flags();
int32_t updated = EmbeddedObjectsClearedField::update(previous, flag);
- code_data_container().set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated);
}
bool Code::deopt_already_counted() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container().kind_specific_flags();
+ int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
return DeoptAlreadyCountedField::decode(flags);
}
void Code::set_deopt_already_counted(bool flag) {
DCHECK(CodeKindCanDeoptimize(kind()));
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
- int32_t previous = code_data_container().kind_specific_flags();
+ CodeDataContainer container = code_data_container(kAcquireLoad);
+ int32_t previous = container.kind_specific_flags();
int32_t updated = DeoptAlreadyCountedField::update(previous, flag);
- code_data_container().set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated);
}
bool Code::is_optimized_code() const {
@@ -534,25 +527,44 @@ bool Code::is_optimized_code() const {
bool Code::is_wasm_code() const { return kind() == CodeKind::WASM_FUNCTION; }
int Code::constant_pool_offset() const {
- if (!FLAG_enable_embedded_constant_pool) return code_comments_offset();
+ if (!FLAG_enable_embedded_constant_pool) {
+ // Redirection needed since the field doesn't exist in this case.
+ return code_comments_offset();
+ }
return ReadField<int>(kConstantPoolOffsetOffset);
}
void Code::set_constant_pool_offset(int value) {
- if (!FLAG_enable_embedded_constant_pool) return;
- DCHECK_LE(value, InstructionSize());
+ if (!FLAG_enable_embedded_constant_pool) {
+ // Redirection needed since the field doesn't exist in this case.
+ return;
+ }
+ DCHECK_LE(value, MetadataSize());
WriteField<int>(kConstantPoolOffsetOffset, value);
}
Address Code::constant_pool() const {
if (!has_constant_pool()) return kNullAddress;
- return InstructionStart() + constant_pool_offset();
+ return MetadataStart() + constant_pool_offset();
}
Address Code::code_comments() const {
- return InstructionStart() + code_comments_offset();
+ return MetadataStart() + code_comments_offset();
}
+Address Code::unwinding_info_start() const {
+ return MetadataStart() + unwinding_info_offset();
+}
+
+Address Code::unwinding_info_end() const { return MetadataEnd(); }
+
+int Code::unwinding_info_size() const {
+ DCHECK_GE(unwinding_info_end(), unwinding_info_start());
+ return static_cast<int>(unwinding_info_end() - unwinding_info_start());
+}
+
+bool Code::has_unwinding_info() const { return unwinding_info_size() > 0; }
+
Code Code::GetCodeFromTargetAddress(Address address) {
{
// TODO(jgruber,v8:6666): Support embedded builtins here. We'd need to pass
@@ -701,8 +713,8 @@ int32_t BytecodeArray::parameter_count() const {
ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
ACCESSORS(BytecodeArray, handler_table, ByteArray, kHandlerTableOffset)
-SYNCHRONIZED_ACCESSORS(BytecodeArray, synchronized_source_position_table,
- Object, kSourcePositionTableOffset)
+RELEASE_ACQUIRE_ACCESSORS(BytecodeArray, source_position_table, Object,
+ kSourcePositionTableOffset)
void BytecodeArray::clear_padding() {
int data_size = kHeaderSize + length();
@@ -715,22 +727,22 @@ Address BytecodeArray::GetFirstBytecodeAddress() {
}
bool BytecodeArray::HasSourcePositionTable() const {
- Object maybe_table = synchronized_source_position_table();
+ Object maybe_table = source_position_table(kAcquireLoad);
return !(maybe_table.IsUndefined() || DidSourcePositionGenerationFail());
}
bool BytecodeArray::DidSourcePositionGenerationFail() const {
- return synchronized_source_position_table().IsException();
+ return source_position_table(kAcquireLoad).IsException();
}
void BytecodeArray::SetSourcePositionsFailedToCollect() {
- set_synchronized_source_position_table(GetReadOnlyRoots().exception());
+ set_source_position_table(GetReadOnlyRoots().exception(), kReleaseStore);
}
ByteArray BytecodeArray::SourcePositionTable() const {
// WARNING: This function may be called from a background thread, hence
// changes to how it accesses the heap can easily lead to bugs.
- Object maybe_table = synchronized_source_position_table();
+ Object maybe_table = source_position_table(kAcquireLoad);
if (maybe_table.IsByteArray()) return ByteArray::cast(maybe_table);
ReadOnlyRoots roots = GetReadOnlyRoots();
DCHECK(maybe_table.IsUndefined(roots) || maybe_table.IsException(roots));
diff --git a/deps/v8/src/objects/code-kind.h b/deps/v8/src/objects/code-kind.h
index a1f9b43900..6314005649 100644
--- a/deps/v8/src/objects/code-kind.h
+++ b/deps/v8/src/objects/code-kind.h
@@ -11,30 +11,35 @@
namespace v8 {
namespace internal {
-// TODO(jgruber,rmcilroy): Rename OPTIMIZED_FUNCTION once we've fully
-// disambiguated Turboprop, Turbofan, and NCI code kinds.
-// TODO(jgruber): Rename STUB to DEOPT_ENTRIES_OR_FOR_TESTING, or split it into
-// DEOPT_ENTRIES and FOR_TESTING, or convert DEOPT_ENTRIES into a builtin.
-#define CODE_KIND_LIST(V) \
- V(OPTIMIZED_FUNCTION) \
- V(BYTECODE_HANDLER) \
- V(STUB) \
- V(BUILTIN) \
- V(REGEXP) \
- V(WASM_FUNCTION) \
- V(WASM_TO_CAPI_FUNCTION) \
- V(WASM_TO_JS_FUNCTION) \
- V(JS_TO_WASM_FUNCTION) \
- V(JS_TO_JS_FUNCTION) \
- V(C_WASM_ENTRY) \
- V(INTERPRETED_FUNCTION) \
- V(NATIVE_CONTEXT_INDEPENDENT)
+// The order of INTERPRETED_FUNCTION to TURBOFAN is important. We use it to
+// check the relative ordering of the tiers when fetching / installing optimized
+// code.
+#define CODE_KIND_LIST(V) \
+ V(BYTECODE_HANDLER) \
+ V(FOR_TESTING) \
+ V(BUILTIN) \
+ V(REGEXP) \
+ V(WASM_FUNCTION) \
+ V(WASM_TO_CAPI_FUNCTION) \
+ V(WASM_TO_JS_FUNCTION) \
+ V(JS_TO_WASM_FUNCTION) \
+ V(JS_TO_JS_FUNCTION) \
+ V(C_WASM_ENTRY) \
+ V(INTERPRETED_FUNCTION) \
+ V(NATIVE_CONTEXT_INDEPENDENT) \
+ V(TURBOPROP) \
+ V(TURBOFAN)
enum class CodeKind {
#define DEFINE_CODE_KIND_ENUM(name) name,
CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
#undef DEFINE_CODE_KIND_ENUM
};
+STATIC_ASSERT(CodeKind::INTERPRETED_FUNCTION < CodeKind::TURBOPROP &&
+ CodeKind::INTERPRETED_FUNCTION <
+ CodeKind::NATIVE_CONTEXT_INDEPENDENT);
+STATIC_ASSERT(CodeKind::TURBOPROP < CodeKind::TURBOFAN &&
+ CodeKind::NATIVE_CONTEXT_INDEPENDENT < CodeKind::TURBOFAN);
#define V(...) +1
static constexpr int kCodeKindCount = CODE_KIND_LIST(V);
@@ -52,8 +57,9 @@ inline constexpr bool CodeKindIsNativeContextIndependentJSFunction(
}
inline constexpr bool CodeKindIsOptimizedJSFunction(CodeKind kind) {
- return kind == CodeKind::OPTIMIZED_FUNCTION ||
- kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
+ return kind == CodeKind::TURBOFAN ||
+ kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT ||
+ kind == CodeKind::TURBOPROP;
}
inline constexpr bool CodeKindIsJSFunction(CodeKind kind) {
@@ -72,9 +78,18 @@ inline constexpr bool CodeKindCanDeoptimize(CodeKind kind) {
return CodeKindIsOptimizedJSFunction(kind);
}
-inline constexpr bool CodeKindChecksOptimizationMarker(CodeKind kind) {
+inline constexpr bool CodeKindCanOSR(CodeKind kind) {
+ return kind == CodeKind::TURBOFAN || kind == CodeKind::TURBOPROP;
+}
+
+inline constexpr bool CodeKindIsOptimizedAndCanTierUp(CodeKind kind) {
+ return kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT ||
+ (FLAG_turboprop_as_midtier && kind == CodeKind::TURBOPROP);
+}
+
+inline constexpr bool CodeKindCanTierUp(CodeKind kind) {
return kind == CodeKind::INTERPRETED_FUNCTION ||
- kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
+ CodeKindIsOptimizedAndCanTierUp(kind);
}
// The optimization marker field on the feedback vector has a dual purpose of
@@ -82,10 +97,32 @@ inline constexpr bool CodeKindChecksOptimizationMarker(CodeKind kind) {
// access from multiple closures. The marker is not used for all code kinds
// though, in particular it is not used when generating NCI code.
inline constexpr bool CodeKindIsStoredInOptimizedCodeCache(CodeKind kind) {
- return kind == CodeKind::OPTIMIZED_FUNCTION;
+ return kind == CodeKind::TURBOFAN || kind == CodeKind::TURBOPROP;
}
-inline CodeKind CodeKindForTopTier() { return CodeKind::OPTIMIZED_FUNCTION; }
+inline OptimizationTier GetTierForCodeKind(CodeKind kind) {
+ if (kind == CodeKind::TURBOFAN) return OptimizationTier::kTopTier;
+ if (kind == CodeKind::TURBOPROP) {
+ return FLAG_turboprop_as_midtier ? OptimizationTier::kMidTier
+ : OptimizationTier::kTopTier;
+ }
+ if (kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT) {
+ return FLAG_turbo_nci_as_midtier ? OptimizationTier::kMidTier
+ : OptimizationTier::kTopTier;
+ }
+ return OptimizationTier::kNone;
+}
+
+inline CodeKind CodeKindForTopTier() {
+ // TODO(turboprop, mythria): We should make FLAG_turboprop mean turboprop is
+ // mid-tier compiler and replace FLAG_turboprop_as_midtier with
+ // FLAG_turboprop_as_top_tier to tier up to only Turboprop once
+ // FLAG_turboprop_as_midtier is stable and major regressions are addressed.
+ if (V8_UNLIKELY(FLAG_turboprop)) {
+ return FLAG_turboprop_as_midtier ? CodeKind::TURBOFAN : CodeKind::TURBOPROP;
+ }
+ return CodeKind::TURBOFAN;
+}
// The dedicated CodeKindFlag enum represents all code kinds in a format
// suitable for bit sets.
@@ -107,11 +144,11 @@ using CodeKinds = base::Flags<CodeKindFlag>;
DEFINE_OPERATORS_FOR_FLAGS(CodeKinds)
static constexpr CodeKinds kJSFunctionCodeKindsMask{
- CodeKindFlag::INTERPRETED_FUNCTION | CodeKindFlag::OPTIMIZED_FUNCTION |
- CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT};
+ CodeKindFlag::INTERPRETED_FUNCTION | CodeKindFlag::TURBOFAN |
+ CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT | CodeKindFlag::TURBOPROP};
static constexpr CodeKinds kOptimizedJSFunctionCodeKindsMask{
- CodeKindFlag::OPTIMIZED_FUNCTION |
- CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT};
+ CodeKindFlag::TURBOFAN | CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT |
+ CodeKindFlag::TURBOPROP};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index c796904718..1004180669 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -31,7 +31,7 @@ namespace v8 {
namespace internal {
Address Code::SafepointTableAddress() const {
- return InstructionStart() + safepoint_table_offset();
+ return MetadataStart() + safepoint_table_offset();
}
int Code::safepoint_table_size() const {
@@ -42,7 +42,7 @@ int Code::safepoint_table_size() const {
bool Code::has_safepoint_table() const { return safepoint_table_size() > 0; }
Address Code::HandlerTableAddress() const {
- return InstructionStart() + handler_table_offset();
+ return MetadataStart() + handler_table_offset();
}
int Code::handler_table_size() const {
@@ -62,14 +62,12 @@ int Code::constant_pool_size() const {
bool Code::has_constant_pool() const { return constant_pool_size() > 0; }
int Code::code_comments_size() const {
- DCHECK_GE(InstructionSize() - code_comments_offset(), 0);
- return InstructionSize() - code_comments_offset();
+ DCHECK_GE(unwinding_info_offset() - code_comments_offset(), 0);
+ return unwinding_info_offset() - code_comments_offset();
}
bool Code::has_code_comments() const { return code_comments_size() > 0; }
-int Code::ExecutableInstructionSize() const { return safepoint_table_offset(); }
-
void Code::ClearEmbeddedObjects(Heap* heap) {
HeapObject undefined = ReadOnlyRoots(heap).undefined_value();
int mode_mask = RelocInfo::EmbeddedObjectModeMask();
@@ -93,17 +91,12 @@ void Code::FlushICache() const {
void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
// Copy code.
+ STATIC_ASSERT(kOnHeapBodyIsContiguous);
CopyBytes(reinterpret_cast<byte*>(raw_instruction_start()), desc.buffer,
static_cast<size_t>(desc.instr_size));
-
- // Copy unwinding info, if any.
- if (desc.unwinding_info) {
- DCHECK_GT(desc.unwinding_info_size, 0);
- set_unwinding_info_size(desc.unwinding_info_size);
- CopyBytes(reinterpret_cast<byte*>(unwinding_info_start()),
- desc.unwinding_info,
- static_cast<size_t>(desc.unwinding_info_size));
- }
+ // TODO(jgruber,v8:11036): Merge with the above.
+ CopyBytes(reinterpret_cast<byte*>(raw_instruction_start() + desc.instr_size),
+ desc.unwinding_info, static_cast<size_t>(desc.unwinding_info_size));
// Copy reloc info.
CopyRelocInfoToByteArray(unchecked_relocation_info(), desc);
@@ -143,29 +136,60 @@ SafepointEntry Code::GetSafepointEntry(Address pc) {
int Code::OffHeapInstructionSize() const {
DCHECK(is_off_heap_trampoline());
- if (Isolate::CurrentEmbeddedBlobCode() == nullptr)
+ if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
return raw_instruction_size();
+ }
EmbeddedData d = EmbeddedData::FromBlob();
return d.InstructionSizeOfBuiltin(builtin_index());
}
Address Code::OffHeapInstructionStart() const {
DCHECK(is_off_heap_trampoline());
- if (Isolate::CurrentEmbeddedBlobCode() == nullptr)
- return raw_instruction_start();
+ if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
+ return raw_instruction_size();
+ }
EmbeddedData d = EmbeddedData::FromBlob();
return d.InstructionStartOfBuiltin(builtin_index());
}
Address Code::OffHeapInstructionEnd() const {
DCHECK(is_off_heap_trampoline());
- if (Isolate::CurrentEmbeddedBlobCode() == nullptr)
- return raw_instruction_end();
+ if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
+ return raw_instruction_size();
+ }
EmbeddedData d = EmbeddedData::FromBlob();
return d.InstructionStartOfBuiltin(builtin_index()) +
d.InstructionSizeOfBuiltin(builtin_index());
}
+int Code::OffHeapMetadataSize() const {
+ DCHECK(is_off_heap_trampoline());
+ if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
+ return raw_instruction_size();
+ }
+ EmbeddedData d = EmbeddedData::FromBlob();
+ return d.MetadataSizeOfBuiltin(builtin_index());
+}
+
+Address Code::OffHeapMetadataStart() const {
+ DCHECK(is_off_heap_trampoline());
+ if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
+ return raw_instruction_size();
+ }
+ EmbeddedData d = EmbeddedData::FromBlob();
+ return d.MetadataStartOfBuiltin(builtin_index());
+}
+
+Address Code::OffHeapMetadataEnd() const {
+ DCHECK(is_off_heap_trampoline());
+ if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
+ return raw_instruction_size();
+ }
+ EmbeddedData d = EmbeddedData::FromBlob();
+ return d.MetadataStartOfBuiltin(builtin_index()) +
+ d.MetadataSizeOfBuiltin(builtin_index());
+}
+
int AbstractCode::SourcePosition(int offset) {
Object maybe_table = source_position_table();
if (maybe_table.IsException()) return kNoSourcePosition;
@@ -679,8 +703,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
}
{
- // Stop before reaching any embedded tables
- int code_size = ExecutableInstructionSize();
+ int code_size = InstructionSize();
os << "Instructions (size = " << code_size << ")\n";
DisassembleCodeRange(isolate, os, *this, InstructionStart(), code_size,
current_pc);
@@ -689,8 +712,8 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
DCHECK_EQ(pool_size & kPointerAlignmentMask, 0);
os << "\nConstant Pool (size = " << pool_size << ")\n";
Vector<char> buf = Vector<char>::New(50);
- intptr_t* ptr = reinterpret_cast<intptr_t*>(InstructionStart() +
- constant_pool_offset());
+ intptr_t* ptr =
+ reinterpret_cast<intptr_t*>(MetadataStart() + constant_pool_offset());
for (int i = 0; i < pool_size; i += kSystemPointerSize, ptr++) {
SNPrintF(buf, "%4d %08" V8PRIxPTR, i, *ptr);
os << static_cast<const void*>(ptr) << " " << buf.begin() << "\n";
@@ -920,6 +943,11 @@ void DependentCode::InstallDependency(Isolate* isolate,
const MaybeObjectHandle& code,
Handle<HeapObject> object,
DependencyGroup group) {
+ if (V8_UNLIKELY(FLAG_trace_code_dependencies)) {
+ StdoutStream{} << "Installing dependency of [" << code->GetHeapObject()
+ << "] on [" << object << "] in group ["
+ << DependencyGroupName(group) << "]\n";
+ }
Handle<DependentCode> old_deps(DependentCode::GetDependentCode(object),
isolate);
Handle<DependentCode> new_deps =
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index d71a0b1132..201f17773a 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -78,6 +78,131 @@ class Code : public HeapObject {
// cache state, and arguments count.
using Flags = uint32_t;
+ // All Code objects have the following layout:
+ //
+ // +--------------------------+
+ // | header |
+ // | padded to code alignment |
+ // +--------------------------+ <-- raw_body_start()
+ // | instructions | == raw_instruction_start()
+ // | ... |
+ // | padded to meta alignment | see kMetadataAlignment
+ // +--------------------------+ <-- raw_instruction_end()
+ // | metadata | == raw_metadata_start() (MS)
+ // | ... |
+ // | | <-- MS + handler_table_offset()
+ // | | <-- MS + constant_pool_offset()
+ // | | <-- MS + code_comments_offset()
+ // | | <-- MS + unwinding_info_offset()
+ // | padded to obj alignment |
+ // +--------------------------+ <-- raw_metadata_end() == raw_body_end()
+ // | padded to code alignment |
+ // +--------------------------+
+ //
+ // In other words, the variable-size 'body' consists of 'instructions' and
+ // 'metadata'.
+ //
+ // Note the accessor functions below may be prefixed with 'raw'. In this case,
+ // raw accessors (e.g. raw_instruction_start) always refer to the on-heap
+ // Code object, while camel-case accessors (e.g. InstructionStart) may refer
+ // to an off-heap area in the case of embedded builtins.
+ //
+ // Embedded builtins are on-heap Code objects, with an out-of-line body
+ // section. The on-heap Code object contains an essentially empty body
+ // section, while accessors, as mentioned above, redirect to the off-heap
+ // area. Metadata table offsets remain relative to MetadataStart(), i.e. they
+ // point into the off-heap metadata section. The off-heap layout is described
+ // in detail in the EmbeddedData class, but at a high level one can assume a
+ // dedicated, out-of-line, instruction and metadata section for each embedded
+ // builtin *in addition* to the on-heap Code object:
+ //
+ // +--------------------------+ <-- InstructionStart()
+ // | off-heap instructions |
+ // | ... |
+ // +--------------------------+ <-- InstructionEnd()
+ //
+ // +--------------------------+ <-- MetadataStart() (MS)
+ // | off-heap metadata |
+ // | ... | <-- MS + handler_table_offset()
+ // | | <-- MS + constant_pool_offset()
+ // | | <-- MS + code_comments_offset()
+ // | | <-- MS + unwinding_info_offset()
+ // +--------------------------+ <-- MetadataEnd()
+
+ // Constants for use in static asserts, stating whether the body is adjacent,
+ // i.e. instructions and metadata areas are adjacent.
+ static constexpr bool kOnHeapBodyIsContiguous = true;
+ static constexpr bool kOffHeapBodyIsContiguous = false;
+ static constexpr bool kBodyIsContiguous =
+ kOnHeapBodyIsContiguous && kOffHeapBodyIsContiguous;
+
+ inline Address raw_body_start() const;
+ inline Address raw_body_end() const;
+ inline int raw_body_size() const;
+
+ inline Address raw_instruction_start() const;
+ inline Address InstructionStart() const;
+ V8_EXPORT_PRIVATE Address OffHeapInstructionStart() const;
+
+ inline Address raw_instruction_end() const;
+ inline Address InstructionEnd() const;
+ V8_EXPORT_PRIVATE Address OffHeapInstructionEnd() const;
+
+ inline int raw_instruction_size() const;
+ inline void set_raw_instruction_size(int value);
+ inline int InstructionSize() const;
+ V8_EXPORT_PRIVATE int OffHeapInstructionSize() const;
+
+ inline Address raw_metadata_start() const;
+ inline Address MetadataStart() const;
+ V8_EXPORT_PRIVATE Address OffHeapMetadataStart() const;
+ inline Address raw_metadata_end() const;
+ inline Address MetadataEnd() const;
+ V8_EXPORT_PRIVATE Address OffHeapMetadataEnd() const;
+ inline int raw_metadata_size() const;
+ inline void set_raw_metadata_size(int value);
+ inline int MetadataSize() const;
+ int OffHeapMetadataSize() const;
+
+ // The metadata section is aligned to this value.
+ static constexpr int kMetadataAlignment = kIntSize;
+
+ // [safepoint_table_offset]: The offset where the safepoint table starts.
+ inline int safepoint_table_offset() const { return 0; }
+ Address SafepointTableAddress() const;
+ int safepoint_table_size() const;
+ bool has_safepoint_table() const;
+
+ // [handler_table_offset]: The offset where the exception handler table
+ // starts.
+ inline int handler_table_offset() const;
+ inline void set_handler_table_offset(int offset);
+ Address HandlerTableAddress() const;
+ int handler_table_size() const;
+ bool has_handler_table() const;
+
+ // [constant_pool offset]: Offset of the constant pool.
+ inline int constant_pool_offset() const;
+ inline void set_constant_pool_offset(int offset);
+ inline Address constant_pool() const;
+ int constant_pool_size() const;
+ bool has_constant_pool() const;
+
+ // [code_comments_offset]: Offset of the code comment section.
+ inline int code_comments_offset() const;
+ inline void set_code_comments_offset(int offset);
+ inline Address code_comments() const;
+ V8_EXPORT_PRIVATE int code_comments_size() const;
+ V8_EXPORT_PRIVATE bool has_code_comments() const;
+
+ // [unwinding_info_offset]: Offset of the unwinding info section.
+ inline int32_t unwinding_info_offset() const;
+ inline void set_unwinding_info_offset(int32_t offset);
+ inline Address unwinding_info_start() const;
+ inline Address unwinding_info_end() const;
+ inline int unwinding_info_size() const;
+ inline bool has_unwinding_info() const;
+
#ifdef ENABLE_DISASSEMBLER
const char* GetName(Isolate* isolate) const;
V8_EXPORT_PRIVATE void Disassemble(const char* name, std::ostream& os,
@@ -85,19 +210,6 @@ class Code : public HeapObject {
Address current_pc = kNullAddress);
#endif
- // [instruction_size]: Size of the native instructions, including embedded
- // data such as the safepoints table.
- inline int raw_instruction_size() const;
- inline void set_raw_instruction_size(int value);
-
- // Returns the size of the native instructions, including embedded
- // data such as the safepoints table. For off-heap code objects
- // this may differ from instruction_size in that this will return the size of
- // the off-heap instruction stream rather than the on-heap trampoline located
- // at instruction_start.
- inline int InstructionSize() const;
- V8_EXPORT_PRIVATE int OffHeapInstructionSize() const;
-
// [relocation_info]: Code relocation information
DECL_ACCESSORS(relocation_info, ByteArray)
@@ -115,7 +227,7 @@ class Code : public HeapObject {
inline ByteArray SourcePositionTable() const;
// [code_data_container]: A container indirection for all mutable fields.
- DECL_ACCESSORS(code_data_container, CodeDataContainer)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(code_data_container, CodeDataContainer)
// [next_code_link]: Link for lists of optimized or deoptimized code.
// Note that this field is stored in the {CodeDataContainer} to be mutable.
@@ -153,8 +265,8 @@ class Code : public HeapObject {
inline void set_can_have_weak_objects(bool value);
// [builtin_index]: For builtins, tells which builtin index the code object
- // has. The builtin index is a non-negative integer for builtins, and -1
- // otherwise.
+ // has. The builtin index is a non-negative integer for builtins, and
+ // Builtins::kNoBuiltinId (-1) otherwise.
inline int builtin_index() const;
inline void set_builtin_index(int id);
inline bool is_builtin() const;
@@ -168,39 +280,6 @@ class Code : public HeapObject {
// reserved in the code prologue.
inline int stack_slots() const;
- // [safepoint_table_offset]: If {has_safepoint_info()}, the offset in the
- // instruction stream where the safepoint table starts.
- inline int safepoint_table_offset() const;
- inline void set_safepoint_table_offset(int offset);
- Address SafepointTableAddress() const;
- int safepoint_table_size() const;
- bool has_safepoint_table() const;
-
- // [handler_table_offset]: The offset in the instruction stream where the
- // exception handler table starts.
- inline int handler_table_offset() const;
- inline void set_handler_table_offset(int offset);
- Address HandlerTableAddress() const;
- int handler_table_size() const;
- bool has_handler_table() const;
-
- // [constant_pool offset]: Offset of the constant pool.
- // Valid for FLAG_enable_embedded_constant_pool only
- inline int constant_pool_offset() const;
- inline void set_constant_pool_offset(int offset);
- int constant_pool_size() const;
- bool has_constant_pool() const;
-
- // [code_comments_offset]: Offset of the code comment section.
- inline int code_comments_offset() const;
- inline void set_code_comments_offset(int offset);
- inline Address code_comments() const;
- V8_EXPORT_PRIVATE int code_comments_size() const;
- V8_EXPORT_PRIVATE bool has_code_comments() const;
-
- // The size of the executable instruction area, without embedded metadata.
- int ExecutableInstructionSize() const;
-
// [marked_for_deoptimization]: If CodeKindCanDeoptimize(kind), tells whether
// the code is going to be deoptimized.
inline bool marked_for_deoptimization() const;
@@ -241,9 +320,6 @@ class Code : public HeapObject {
// this is a trampoline to an off-heap builtin.
inline bool is_off_heap_trampoline() const;
- // [constant_pool]: The constant pool for this function.
- inline Address constant_pool() const;
-
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
@@ -261,9 +337,8 @@ class Code : public HeapObject {
inline void clear_padding();
// Initialize the flags field. Similar to clear_padding above this ensure that
// the snapshot content is deterministic.
- inline void initialize_flags(CodeKind kind, bool has_unwinding_info,
- bool is_turbofanned, int stack_slots,
- bool is_off_heap_trampoline);
+ inline void initialize_flags(CodeKind kind, bool is_turbofanned,
+ int stack_slots, bool is_off_heap_trampoline);
// Convert a target address into a code object.
static inline Code GetCodeFromTargetAddress(Address address);
@@ -271,30 +346,8 @@ class Code : public HeapObject {
// Convert an entry address into an object.
static inline Code GetObjectFromEntryAddress(Address location_of_address);
- // Returns the address of the first instruction.
- inline Address raw_instruction_start() const;
-
- // Returns the address of the first instruction. For off-heap code objects
- // this differs from instruction_start (which would point to the off-heap
- // trampoline instead).
- inline Address InstructionStart() const;
- V8_EXPORT_PRIVATE Address OffHeapInstructionStart() const;
-
- // Returns the address right after the last instruction.
- inline Address raw_instruction_end() const;
-
- // Returns the address right after the last instruction. For off-heap code
- // objects this differs from instruction_end (which would point to the
- // off-heap trampoline instead).
- inline Address InstructionEnd() const;
- V8_EXPORT_PRIVATE Address OffHeapInstructionEnd() const;
-
- // Returns the size of the instructions, padding, relocation and unwinding
- // information.
- inline int body_size() const;
-
// Returns the size of code and its metadata. This includes the size of code
- // relocation information, deoptimization data and handler table.
+ // relocation information, deoptimization data.
inline int SizeIncludingMetadata() const;
// Returns the address of the first relocation info (read backwards!).
@@ -303,52 +356,6 @@ class Code : public HeapObject {
// Returns the address right after the relocation info (read backwards!).
inline byte* relocation_end() const;
- // [has_unwinding_info]: Whether this code object has unwinding information.
- // If it doesn't, unwinding_information_start() will point to invalid data.
- //
- // The body of all code objects has the following layout.
- //
- // +--------------------------+ <-- raw_instruction_start()
- // | instructions |
- // | ... |
- // +--------------------------+
- // | embedded metadata | <-- safepoint_table_offset()
- // | ... | <-- handler_table_offset()
- // | | <-- constant_pool_offset()
- // | | <-- code_comments_offset()
- // | |
- // +--------------------------+ <-- raw_instruction_end()
- //
- // If has_unwinding_info() is false, raw_instruction_end() points to the first
- // memory location after the end of the code object. Otherwise, the body
- // continues as follows:
- //
- // +--------------------------+
- // | padding to the next |
- // | 8-byte aligned address |
- // +--------------------------+ <-- raw_instruction_end()
- // | [unwinding_info_size] |
- // | as uint64_t |
- // +--------------------------+ <-- unwinding_info_start()
- // | unwinding info |
- // | ... |
- // +--------------------------+ <-- unwinding_info_end()
- //
- // and unwinding_info_end() points to the first memory location after the end
- // of the code object.
- //
- inline bool has_unwinding_info() const;
-
- // [unwinding_info_size]: Size of the unwinding information.
- inline int unwinding_info_size() const;
- inline void set_unwinding_info_size(int value);
-
- // Returns the address of the unwinding information, if any.
- inline Address unwinding_info_start() const;
-
- // Returns the address right after the end of the unwinding information.
- inline Address unwinding_info_end() const;
-
// Code entry point.
inline Address entry() const;
@@ -373,14 +380,9 @@ class Code : public HeapObject {
// Returns the object size for a given body (used for allocation).
static int SizeFor(int body_size) {
- DCHECK_SIZE_TAG_ALIGNED(body_size);
return RoundUp(kHeaderSize + body_size, kCodeAlignment);
}
- // Calculate the size of the code object to report for log events. This takes
- // the layout of the code object into account.
- inline int ExecutableSize() const;
-
DECL_CAST(Code)
// Dispatched behavior.
@@ -414,28 +416,30 @@ class Code : public HeapObject {
class OptimizedCodeIterator;
// Layout description.
-#define CODE_FIELDS(V) \
- V(kRelocationInfoOffset, kTaggedSize) \
- V(kDeoptimizationDataOffset, kTaggedSize) \
- V(kSourcePositionTableOffset, kTaggedSize) \
- V(kCodeDataContainerOffset, kTaggedSize) \
- /* Data or code not directly visited by GC directly starts here. */ \
- /* The serializer needs to copy bytes starting from here verbatim. */ \
- /* Objects embedded into code is visited via reloc info. */ \
- V(kDataStart, 0) \
- V(kInstructionSizeOffset, kIntSize) \
- V(kFlagsOffset, kInt32Size) \
- V(kSafepointTableOffsetOffset, kIntSize) \
- V(kHandlerTableOffsetOffset, kIntSize) \
- V(kConstantPoolOffsetOffset, \
- FLAG_enable_embedded_constant_pool ? kIntSize : 0) \
- V(kCodeCommentsOffsetOffset, kIntSize) \
- V(kBuiltinIndexOffset, kIntSize) \
- V(kInlinedBytecodeSizeOffset, kIntSize) \
- V(kUnalignedHeaderSize, 0) \
- /* Add padding to align the instruction start following right after */ \
- /* the Code object header. */ \
- V(kOptionalPaddingOffset, CODE_POINTER_PADDING(kOptionalPaddingOffset)) \
+#define CODE_FIELDS(V) \
+ V(kRelocationInfoOffset, kTaggedSize) \
+ V(kDeoptimizationDataOffset, kTaggedSize) \
+ V(kSourcePositionTableOffset, kTaggedSize) \
+ V(kCodeDataContainerOffset, kTaggedSize) \
+ /* Data or code not directly visited by GC directly starts here. */ \
+ /* The serializer needs to copy bytes starting from here verbatim. */ \
+ /* Objects embedded into code is visited via reloc info. */ \
+ V(kDataStart, 0) \
+ V(kInstructionSizeOffset, kIntSize) \
+ V(kMetadataSizeOffset, kIntSize) \
+ V(kFlagsOffset, kInt32Size) \
+ V(kBuiltinIndexOffset, kIntSize) \
+ V(kInlinedBytecodeSizeOffset, kIntSize) \
+ /* Offsets describing inline metadata tables, relative to MetadataStart. */ \
+ V(kHandlerTableOffsetOffset, kIntSize) \
+ V(kConstantPoolOffsetOffset, \
+ FLAG_enable_embedded_constant_pool ? kIntSize : 0) \
+ V(kCodeCommentsOffsetOffset, kIntSize) \
+ V(kUnwindingInfoOffsetOffset, kInt32Size) \
+ V(kUnalignedHeaderSize, 0) \
+ /* Add padding to align the instruction start following right after */ \
+ /* the Code object header. */ \
+ V(kOptionalPaddingOffset, CODE_POINTER_PADDING(kOptionalPaddingOffset)) \
V(kHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, CODE_FIELDS)
@@ -444,35 +448,32 @@ class Code : public HeapObject {
// This documents the amount of free space we have in each Code object header
// due to padding for code alignment.
#if V8_TARGET_ARCH_ARM64
- static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 16 : 28;
+ static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
#elif V8_TARGET_ARCH_MIPS64
- static constexpr int kHeaderPaddingSize = 28;
+ static constexpr int kHeaderPaddingSize = 24;
#elif V8_TARGET_ARCH_X64
- static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 16 : 28;
+ static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
#elif V8_TARGET_ARCH_ARM
- static constexpr int kHeaderPaddingSize = 16;
+ static constexpr int kHeaderPaddingSize = 12;
#elif V8_TARGET_ARCH_IA32
- static constexpr int kHeaderPaddingSize = 16;
+ static constexpr int kHeaderPaddingSize = 12;
#elif V8_TARGET_ARCH_MIPS
- static constexpr int kHeaderPaddingSize = 16;
+ static constexpr int kHeaderPaddingSize = 12;
#elif V8_TARGET_ARCH_PPC64
static constexpr int kHeaderPaddingSize =
- FLAG_enable_embedded_constant_pool ? (COMPRESS_POINTERS_BOOL ? 12 : 24)
- : (COMPRESS_POINTERS_BOOL ? 16 : 28);
+ FLAG_enable_embedded_constant_pool ? (COMPRESS_POINTERS_BOOL ? 8 : 20)
+ : (COMPRESS_POINTERS_BOOL ? 12 : 24);
#elif V8_TARGET_ARCH_S390X
- static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 16 : 28;
+ static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
#else
#error Unknown architecture.
#endif
STATIC_ASSERT(FIELD_SIZE(kOptionalPaddingOffset) == kHeaderPaddingSize);
- inline int GetUnwindingInfoSizeOffset() const;
-
class BodyDescriptor;
// Flags layout. base::BitField<type, shift, size>.
#define CODE_FLAGS_BIT_FIELDS(V, _) \
- V(HasUnwindingInfoField, bool, 1, _) \
V(KindField, CodeKind, 4, _) \
V(IsTurbofannedField, bool, 1, _) \
V(StackSlotsField, int, 24, _) \
@@ -480,7 +481,7 @@ class Code : public HeapObject {
DEFINE_BIT_FIELDS(CODE_FLAGS_BIT_FIELDS)
#undef CODE_FLAGS_BIT_FIELDS
STATIC_ASSERT(kCodeKindCount <= KindField::kNumValues);
- STATIC_ASSERT(CODE_FLAGS_BIT_FIELDS_Ranges::kBitsCount == 31);
+ STATIC_ASSERT(CODE_FLAGS_BIT_FIELDS_Ranges::kBitsCount == 30);
STATIC_ASSERT(CODE_FLAGS_BIT_FIELDS_Ranges::kBitsCount <=
FIELD_SIZE(kFlagsOffset) * kBitsPerByte);
@@ -520,6 +521,8 @@ class Code : public HeapObject {
class Code::OptimizedCodeIterator {
public:
explicit OptimizedCodeIterator(Isolate* isolate);
+ OptimizedCodeIterator(const OptimizedCodeIterator&) = delete;
+ OptimizedCodeIterator& operator=(const OptimizedCodeIterator&) = delete;
Code Next();
private:
@@ -528,7 +531,6 @@ class Code::OptimizedCodeIterator {
Isolate* isolate_;
DISALLOW_HEAP_ALLOCATION(no_gc)
- DISALLOW_COPY_AND_ASSIGN(OptimizedCodeIterator);
};
class AbstractCode : public HeapObject {
@@ -578,10 +580,6 @@ class AbstractCode : public HeapObject {
// Returns the kind of the code.
inline CodeKind kind();
- // Calculate the size of the code object to report for log events. This takes
- // the layout of the code object into account.
- inline int ExecutableSize();
-
DECL_CAST(AbstractCode)
inline Code GetCode();
inline BytecodeArray GetBytecodeArray();
@@ -644,7 +642,7 @@ class DependentCode : public WeakFixedArray {
kAllocationSiteTransitionChangedGroup
};
- // Register a code dependency of {cell} on {object}.
+ // Register a dependency of {code} on {object}, of the kind given by {group}.
V8_EXPORT_PRIVATE static void InstallDependency(Isolate* isolate,
const MaybeObjectHandle& code,
Handle<HeapObject> object,
@@ -774,7 +772,7 @@ class BytecodeArray : public FixedArrayBase {
// * ByteArray (when source positions have been collected for the bytecode)
// * exception (when an error occurred while explicitly collecting source
// positions for pre-existing bytecode).
- DECL_SYNCHRONIZED_ACCESSORS(source_position_table, Object)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(source_position_table, Object)
inline bool HasSourcePositionTable() const;
inline bool DidSourcePositionGenerationFail() const;
diff --git a/deps/v8/src/objects/compilation-cache-inl.h b/deps/v8/src/objects/compilation-cache-table-inl.h
index 324b40f7ea..473eed496c 100644
--- a/deps/v8/src/objects/compilation-cache-inl.h
+++ b/deps/v8/src/objects/compilation-cache-table-inl.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_COMPILATION_CACHE_INL_H_
-#define V8_OBJECTS_COMPILATION_CACHE_INL_H_
-
-#include "src/objects/compilation-cache.h"
+#ifndef V8_OBJECTS_COMPILATION_CACHE_TABLE_INL_H_
+#define V8_OBJECTS_COMPILATION_CACHE_TABLE_INL_H_
+#include "src/objects/compilation-cache-table.h"
#include "src/objects/name-inl.h"
#include "src/objects/script-inl.h"
#include "src/objects/shared-function-info.h"
@@ -93,4 +92,4 @@ InfoCellPair::InfoCellPair(Isolate* isolate, SharedFunctionInfo shared,
#include "src/objects/object-macros-undef.h"
-#endif // V8_OBJECTS_COMPILATION_CACHE_INL_H_
+#endif // V8_OBJECTS_COMPILATION_CACHE_TABLE_INL_H_
diff --git a/deps/v8/src/objects/compilation-cache-table.cc b/deps/v8/src/objects/compilation-cache-table.cc
new file mode 100644
index 0000000000..57cbeb040c
--- /dev/null
+++ b/deps/v8/src/objects/compilation-cache-table.cc
@@ -0,0 +1,447 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/compilation-cache-table.h"
+
+#include "src/objects/compilation-cache-table-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+const int kLiteralEntryLength = 2;
+const int kLiteralInitialLength = 2;
+const int kLiteralContextOffset = 0;
+const int kLiteralLiteralsOffset = 1;
+
+// The initial placeholder insertion of the eval cache survives this many GCs.
+const int kHashGenerations = 10;
+
+int SearchLiteralsMapEntry(CompilationCacheTable cache, int cache_entry,
+ Context native_context) {
+ DisallowHeapAllocation no_gc;
+ DCHECK(native_context.IsNativeContext());
+ Object obj = cache.get(cache_entry);
+
+ // Check that there's no confusion between FixedArray and WeakFixedArray (the
+ // object used to be a FixedArray here).
+ DCHECK(!obj.IsFixedArray());
+ if (obj.IsWeakFixedArray()) {
+ WeakFixedArray literals_map = WeakFixedArray::cast(obj);
+ int length = literals_map.length();
+ for (int i = 0; i < length; i += kLiteralEntryLength) {
+ DCHECK(literals_map.Get(i + kLiteralContextOffset)->IsWeakOrCleared());
+ if (literals_map.Get(i + kLiteralContextOffset) ==
+ HeapObjectReference::Weak(native_context)) {
+ return i;
+ }
+ }
+ }
+ return -1;
+}
+
+void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
+ Handle<Context> native_context,
+ Handle<FeedbackCell> feedback_cell) {
+ Isolate* isolate = native_context->GetIsolate();
+ DCHECK(native_context->IsNativeContext());
+ STATIC_ASSERT(kLiteralEntryLength == 2);
+ Handle<WeakFixedArray> new_literals_map;
+ int entry;
+
+ Object obj = cache->get(cache_entry);
+
+ // Check that there's no confusion between FixedArray and WeakFixedArray (the
+ // object used to be a FixedArray here).
+ DCHECK(!obj.IsFixedArray());
+ if (!obj.IsWeakFixedArray() || WeakFixedArray::cast(obj).length() == 0) {
+ new_literals_map = isolate->factory()->NewWeakFixedArray(
+ kLiteralInitialLength, AllocationType::kOld);
+ entry = 0;
+ } else {
+ Handle<WeakFixedArray> old_literals_map(WeakFixedArray::cast(obj), isolate);
+ entry = SearchLiteralsMapEntry(*cache, cache_entry, *native_context);
+ if (entry >= 0) {
+ // Just set the code of the entry.
+ old_literals_map->Set(entry + kLiteralLiteralsOffset,
+ HeapObjectReference::Weak(*feedback_cell));
+ return;
+ }
+
+ // Can we reuse an entry?
+ DCHECK_LT(entry, 0);
+ int length = old_literals_map->length();
+ for (int i = 0; i < length; i += kLiteralEntryLength) {
+ if (old_literals_map->Get(i + kLiteralContextOffset)->IsCleared()) {
+ new_literals_map = old_literals_map;
+ entry = i;
+ break;
+ }
+ }
+
+ if (entry < 0) {
+ // Copy old optimized code map and append one new entry.
+ new_literals_map = isolate->factory()->CopyWeakFixedArrayAndGrow(
+ old_literals_map, kLiteralEntryLength);
+ entry = old_literals_map->length();
+ }
+ }
+
+ new_literals_map->Set(entry + kLiteralContextOffset,
+ HeapObjectReference::Weak(*native_context));
+ new_literals_map->Set(entry + kLiteralLiteralsOffset,
+ HeapObjectReference::Weak(*feedback_cell));
+
+#ifdef DEBUG
+ for (int i = 0; i < new_literals_map->length(); i += kLiteralEntryLength) {
+ MaybeObject object = new_literals_map->Get(i + kLiteralContextOffset);
+ DCHECK(object->IsCleared() ||
+ object->GetHeapObjectAssumeWeak().IsNativeContext());
+ object = new_literals_map->Get(i + kLiteralLiteralsOffset);
+ DCHECK(object->IsCleared() ||
+ object->GetHeapObjectAssumeWeak().IsFeedbackCell());
+ }
+#endif
+
+ Object old_literals_map = cache->get(cache_entry);
+ if (old_literals_map != *new_literals_map) {
+ cache->set(cache_entry, *new_literals_map);
+ }
+}
+
+FeedbackCell SearchLiteralsMap(CompilationCacheTable cache, int cache_entry,
+ Context native_context) {
+ FeedbackCell result;
+ int entry = SearchLiteralsMapEntry(cache, cache_entry, native_context);
+ if (entry >= 0) {
+ WeakFixedArray literals_map = WeakFixedArray::cast(cache.get(cache_entry));
+ DCHECK_LE(entry + kLiteralEntryLength, literals_map.length());
+ MaybeObject object = literals_map.Get(entry + kLiteralLiteralsOffset);
+
+ if (!object->IsCleared()) {
+ result = FeedbackCell::cast(object->GetHeapObjectAssumeWeak());
+ }
+ }
+ DCHECK(result.is_null() || result.IsFeedbackCell());
+ return result;
+}
+
+// StringSharedKeys are used as keys in the eval cache.
+class StringSharedKey : public HashTableKey {
+ public:
+ // This tuple unambiguously identifies calls to eval() or
+ // CreateDynamicFunction() (such as through the Function() constructor).
+ // * source is the string passed into eval(). For dynamic functions, this is
+ // the effective source for the function, some of which is implicitly
+ // generated.
+ // * shared is the shared function info for the function containing the call
+ // to eval(). for dynamic functions, shared is the native context closure.
+ // * When positive, position is the position in the source where eval is
+ // called. When negative, position is the negation of the position in the
+ // dynamic function's effective source where the ')' ends the parameters.
+ StringSharedKey(Handle<String> source, Handle<SharedFunctionInfo> shared,
+ LanguageMode language_mode, int position)
+ : HashTableKey(CompilationCacheShape::StringSharedHash(
+ *source, *shared, language_mode, position)),
+ source_(source),
+ shared_(shared),
+ language_mode_(language_mode),
+ position_(position) {}
+
+ bool IsMatch(Object other) override {
+ DisallowHeapAllocation no_allocation;
+ if (!other.IsFixedArray()) {
+ DCHECK(other.IsNumber());
+ uint32_t other_hash = static_cast<uint32_t>(other.Number());
+ return Hash() == other_hash;
+ }
+ FixedArray other_array = FixedArray::cast(other);
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(other_array.get(0));
+ if (shared != *shared_) return false;
+ int language_unchecked = Smi::ToInt(other_array.get(2));
+ DCHECK(is_valid_language_mode(language_unchecked));
+ LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
+ if (language_mode != language_mode_) return false;
+ int position = Smi::ToInt(other_array.get(3));
+ if (position != position_) return false;
+ String source = String::cast(other_array.get(1));
+ return source.Equals(*source_);
+ }
+
+ Handle<Object> AsHandle(Isolate* isolate) {
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(4);
+ array->set(0, *shared_);
+ array->set(1, *source_);
+ array->set(2, Smi::FromEnum(language_mode_));
+ array->set(3, Smi::FromInt(position_));
+ array->set_map(ReadOnlyRoots(isolate).fixed_cow_array_map());
+ return array;
+ }
+
+ private:
+ Handle<String> source_;
+ Handle<SharedFunctionInfo> shared_;
+ LanguageMode language_mode_;
+ int position_;
+};
+
+// RegExpKey carries the source and flags of a regular expression as key.
+class RegExpKey : public HashTableKey {
+ public:
+ RegExpKey(Handle<String> string, JSRegExp::Flags flags)
+ : HashTableKey(
+ CompilationCacheShape::RegExpHash(*string, Smi::FromInt(flags))),
+ string_(string),
+ flags_(Smi::FromInt(flags)) {}
+
+ // Rather than storing the key in the hash table, a pointer to the
+ // stored value is stored where the key should be. IsMatch then
+ // compares the search key to the found object, rather than comparing
+ // a key to a key.
+ bool IsMatch(Object obj) override {
+ FixedArray val = FixedArray::cast(obj);
+ return string_->Equals(String::cast(val.get(JSRegExp::kSourceIndex))) &&
+ (flags_ == val.get(JSRegExp::kFlagsIndex));
+ }
+
+ Handle<String> string_;
+ Smi flags_;
+};
+
+// CodeKey carries the SharedFunctionInfo key associated with a Code
+// object value.
+class CodeKey : public HashTableKey {
+ public:
+ explicit CodeKey(Handle<SharedFunctionInfo> key)
+ : HashTableKey(key->Hash()), key_(key) {}
+
+ bool IsMatch(Object string) override { return *key_ == string; }
+
+ Handle<SharedFunctionInfo> key_;
+};
+
+} // namespace
+
+MaybeHandle<SharedFunctionInfo> CompilationCacheTable::LookupScript(
+ Handle<CompilationCacheTable> table, Handle<String> src,
+ Handle<Context> native_context, LanguageMode language_mode) {
+ // We use the empty function SFI as part of the key. Although the
+ // empty_function is native context dependent, the SFI is de-duped on
+ // snapshot builds by the StartupObjectCache, and so this does not prevent
+ // reuse of scripts in the compilation cache across native contexts.
+ Handle<SharedFunctionInfo> shared(native_context->empty_function().shared(),
+ native_context->GetIsolate());
+ Isolate* isolate = native_context->GetIsolate();
+ src = String::Flatten(isolate, src);
+ StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
+ InternalIndex entry = table->FindEntry(isolate, &key);
+ if (entry.is_not_found()) return MaybeHandle<SharedFunctionInfo>();
+ int index = EntryToIndex(entry);
+ if (!table->get(index).IsFixedArray()) {
+ return MaybeHandle<SharedFunctionInfo>();
+ }
+ Object obj = table->get(index + 1);
+ if (obj.IsSharedFunctionInfo()) {
+ return handle(SharedFunctionInfo::cast(obj), native_context->GetIsolate());
+ }
+ return MaybeHandle<SharedFunctionInfo>();
+}
+
+InfoCellPair CompilationCacheTable::LookupEval(
+ Handle<CompilationCacheTable> table, Handle<String> src,
+ Handle<SharedFunctionInfo> outer_info, Handle<Context> native_context,
+ LanguageMode language_mode, int position) {
+ InfoCellPair empty_result;
+ Isolate* isolate = native_context->GetIsolate();
+ src = String::Flatten(isolate, src);
+
+ StringSharedKey key(src, outer_info, language_mode, position);
+ InternalIndex entry = table->FindEntry(isolate, &key);
+ if (entry.is_not_found()) return empty_result;
+
+ int index = EntryToIndex(entry);
+ if (!table->get(index).IsFixedArray()) return empty_result;
+ Object obj = table->get(index + 1);
+ if (!obj.IsSharedFunctionInfo()) return empty_result;
+
+ STATIC_ASSERT(CompilationCacheShape::kEntrySize == 3);
+ FeedbackCell feedback_cell =
+ SearchLiteralsMap(*table, index + 2, *native_context);
+ return InfoCellPair(isolate, SharedFunctionInfo::cast(obj), feedback_cell);
+}
+
+Handle<Object> CompilationCacheTable::LookupRegExp(Handle<String> src,
+ JSRegExp::Flags flags) {
+ Isolate* isolate = GetIsolate();
+ DisallowHeapAllocation no_allocation;
+ RegExpKey key(src, flags);
+ InternalIndex entry = FindEntry(isolate, &key);
+ if (entry.is_not_found()) return isolate->factory()->undefined_value();
+ return Handle<Object>(get(EntryToIndex(entry) + 1), isolate);
+}
+
+MaybeHandle<Code> CompilationCacheTable::LookupCode(
+ Handle<SharedFunctionInfo> key) {
+ Isolate* isolate = GetIsolate();
+ DisallowHeapAllocation no_allocation;
+ CodeKey k(key);
+ InternalIndex entry = FindEntry(isolate, &k);
+ if (entry.is_not_found()) return {};
+ return Handle<Code>(Code::cast(get(EntryToIndex(entry) + 1)), isolate);
+}
+
+Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
+ Handle<CompilationCacheTable> cache, Handle<String> src,
+ Handle<Context> native_context, LanguageMode language_mode,
+ Handle<SharedFunctionInfo> value) {
+ Isolate* isolate = native_context->GetIsolate();
+ // We use the empty function SFI as part of the key. Although the
+ // empty_function is native context dependent, the SFI is de-duped on
+ // snapshot builds by the StartupObjectCache, and so this does not prevent
+ // reuse of scripts in the compilation cache across native contexts.
+ Handle<SharedFunctionInfo> shared(native_context->empty_function().shared(),
+ isolate);
+ src = String::Flatten(isolate, src);
+ StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
+ Handle<Object> k = key.AsHandle(isolate);
+ cache = EnsureCapacity(isolate, cache);
+ InternalIndex entry = cache->FindInsertionEntry(isolate, key.Hash());
+ cache->set(EntryToIndex(entry), *k);
+ cache->set(EntryToIndex(entry) + 1, *value);
+ cache->ElementAdded();
+ return cache;
+}
+
+Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
+ Handle<CompilationCacheTable> cache, Handle<String> src,
+ Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
+ Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
+ int position) {
+ Isolate* isolate = native_context->GetIsolate();
+ src = String::Flatten(isolate, src);
+ StringSharedKey key(src, outer_info, value->language_mode(), position);
+
+ // This block handles 'real' insertions, i.e. the initial dummy insert
+ // (below) has already happened earlier.
+ {
+ Handle<Object> k = key.AsHandle(isolate);
+ InternalIndex entry = cache->FindEntry(isolate, &key);
+ if (entry.is_found()) {
+ cache->set(EntryToIndex(entry), *k);
+ cache->set(EntryToIndex(entry) + 1, *value);
+ // AddToFeedbackCellsMap may allocate a new sub-array to live in the
+ // entry, but it won't change the cache array. Therefore EntryToIndex
+ // and entry remains correct.
+ STATIC_ASSERT(CompilationCacheShape::kEntrySize == 3);
+ AddToFeedbackCellsMap(cache, EntryToIndex(entry) + 2, native_context,
+ feedback_cell);
+ // Add hash again even on cache hit to avoid unnecessary cache delay in
+ // case of hash collisions.
+ }
+ }
+
+ // Create a dummy entry to mark that this key has already been inserted once.
+ cache = EnsureCapacity(isolate, cache);
+ InternalIndex entry = cache->FindInsertionEntry(isolate, key.Hash());
+ Handle<Object> k =
+ isolate->factory()->NewNumber(static_cast<double>(key.Hash()));
+ cache->set(EntryToIndex(entry), *k);
+ cache->set(EntryToIndex(entry) + 1, Smi::FromInt(kHashGenerations));
+ cache->ElementAdded();
+ return cache;
+}
+
+Handle<CompilationCacheTable> CompilationCacheTable::PutRegExp(
+ Isolate* isolate, Handle<CompilationCacheTable> cache, Handle<String> src,
+ JSRegExp::Flags flags, Handle<FixedArray> value) {
+ RegExpKey key(src, flags);
+ cache = EnsureCapacity(isolate, cache);
+ InternalIndex entry = cache->FindInsertionEntry(isolate, key.Hash());
+ // We store the value in the key slot, and compare the search key
+ // to the stored value with a custom IsMatch function during lookups.
+ cache->set(EntryToIndex(entry), *value);
+ cache->set(EntryToIndex(entry) + 1, *value);
+ cache->ElementAdded();
+ return cache;
+}
+
+Handle<CompilationCacheTable> CompilationCacheTable::PutCode(
+ Isolate* isolate, Handle<CompilationCacheTable> cache,
+ Handle<SharedFunctionInfo> key, Handle<Code> value) {
+ CodeKey k(key);
+
+ {
+ InternalIndex entry = cache->FindEntry(isolate, &k);
+ if (entry.is_found()) {
+ // Update.
+ cache->set(EntryToIndex(entry), *key);
+ cache->set(EntryToIndex(entry) + 1, *value);
+ return cache;
+ }
+ }
+
+ // Insert.
+ cache = EnsureCapacity(isolate, cache);
+ InternalIndex entry = cache->FindInsertionEntry(isolate, k.Hash());
+ cache->set(EntryToIndex(entry), *key);
+ cache->set(EntryToIndex(entry) + 1, *value);
+ cache->ElementAdded();
+ return cache;
+}
+
+void CompilationCacheTable::Age() {
+ DisallowHeapAllocation no_allocation;
+ for (InternalIndex entry : IterateEntries()) {
+ const int entry_index = EntryToIndex(entry);
+ const int value_index = entry_index + 1;
+
+ Object key = get(entry_index);
+ if (key.IsNumber()) {
+ // The ageing mechanism for the initial dummy entry in the eval cache.
+ // The 'key' is the hash represented as a Number. The 'value' is a smi
+ // counting down from kHashGenerations. On reaching zero, the entry is
+ // cleared.
+ // Note: The following static assert only establishes an explicit
+ // connection between initialization- and use-sites of the smi value
+ // field.
+ STATIC_ASSERT(kHashGenerations);
+ const int new_count = Smi::ToInt(get(value_index)) - 1;
+ if (new_count == 0) {
+ RemoveEntry(entry_index);
+ } else {
+ DCHECK_GT(new_count, 0);
+ NoWriteBarrierSet(*this, value_index, Smi::FromInt(new_count));
+ }
+ } else if (key.IsFixedArray()) {
+ // The ageing mechanism for script and eval caches.
+ SharedFunctionInfo info = SharedFunctionInfo::cast(get(value_index));
+ if (info.IsInterpreted() && info.GetBytecodeArray().IsOld()) {
+ RemoveEntry(entry_index);
+ }
+ }
+ }
+}
+
+void CompilationCacheTable::Remove(Object value) {
+ DisallowHeapAllocation no_allocation;
+ for (InternalIndex entry : IterateEntries()) {
+ int entry_index = EntryToIndex(entry);
+ int value_index = entry_index + 1;
+ if (get(value_index) == value) {
+ RemoveEntry(entry_index);
+ }
+ }
+}
+
+void CompilationCacheTable::RemoveEntry(int entry_index) {
+ Object the_hole_value = GetReadOnlyRoots().the_hole_value();
+ for (int i = 0; i < kEntrySize; i++) {
+ NoWriteBarrierSet(*this, entry_index + i, the_hole_value);
+ }
+ ElementRemoved();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/compilation-cache.h b/deps/v8/src/objects/compilation-cache-table.h
index d2665513d2..b624767b8a 100644
--- a/deps/v8/src/objects/compilation-cache.h
+++ b/deps/v8/src/objects/compilation-cache-table.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_COMPILATION_CACHE_H_
-#define V8_OBJECTS_COMPILATION_CACHE_H_
+#ifndef V8_OBJECTS_COMPILATION_CACHE_TABLE_H_
+#define V8_OBJECTS_COMPILATION_CACHE_TABLE_H_
#include "src/objects/feedback-cell.h"
#include "src/objects/hash-table.h"
@@ -37,6 +37,10 @@ class CompilationCacheShape : public BaseShape<HashTableKey*> {
static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
static const int kPrefixSize = 0;
+ // An 'entry' is essentially a grouped collection of slots. Entries are used
+ // in various ways by the different caches; most store the actual key in the
+ // first entry slot, but it may also be used differently.
+ // Why 3 slots? Because of the eval cache.
static const int kEntrySize = 3;
static const bool kMatchNeedsHoleCheck = true;
};
@@ -74,59 +78,63 @@ class InfoCellPair {
EXTERN_DECLARE_HASH_TABLE(CompilationCacheTable, CompilationCacheShape)
-// This cache is used in multiple different variants.
-//
-// For regexp caching, it simply maps identifying info of the regexp
-// to the cached regexp object.
-//
-// Scripts and eval code only gets cached after a second probe for the
-// code object. To do so, on first "put" only a hash identifying the
-// source is entered into the cache, mapping it to a lifetime count of
-// the hash. On each call to Age all such lifetimes get reduced, and
-// removed once they reach zero. If a second put is called while such
-// a hash is live in the cache, the hash gets replaced by an actual
-// cache entry. Age also removes stale live entries from the cache.
-// Such entries are identified by SharedFunctionInfos pointing to
-// either the recompilation stub, or to "old" code. This avoids memory
-// leaks due to premature caching of scripts and eval strings that are
-// never needed later.
class CompilationCacheTable
: public HashTable<CompilationCacheTable, CompilationCacheShape> {
public:
NEVER_READ_ONLY_SPACE
+
+ // The 'script' cache contains SharedFunctionInfos.
static MaybeHandle<SharedFunctionInfo> LookupScript(
Handle<CompilationCacheTable> table, Handle<String> src,
Handle<Context> native_context, LanguageMode language_mode);
+ static Handle<CompilationCacheTable> PutScript(
+ Handle<CompilationCacheTable> cache, Handle<String> src,
+ Handle<Context> native_context, LanguageMode language_mode,
+ Handle<SharedFunctionInfo> value);
+
+ // Eval code only gets cached after a second probe for the
+ // code object. To do so, on first "put" only a hash identifying the
+ // source is entered into the cache, mapping it to a lifetime count of
+ // the hash. On each call to Age all such lifetimes get reduced, and
+ // removed once they reach zero. If a second put is called while such
+ // a hash is live in the cache, the hash gets replaced by an actual
+ // cache entry. Age also removes stale live entries from the cache.
+ // Such entries are identified by SharedFunctionInfos pointing to
+ // either the recompilation stub, or to "old" code. This avoids memory
+ // leaks due to premature caching of eval strings that are
+ // never needed later.
static InfoCellPair LookupEval(Handle<CompilationCacheTable> table,
Handle<String> src,
Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
LanguageMode language_mode, int position);
- Handle<Object> LookupRegExp(Handle<String> source, JSRegExp::Flags flags);
- MaybeHandle<Code> LookupCode(Handle<SharedFunctionInfo> key);
-
- static Handle<CompilationCacheTable> PutScript(
- Handle<CompilationCacheTable> cache, Handle<String> src,
- Handle<Context> native_context, LanguageMode language_mode,
- Handle<SharedFunctionInfo> value);
static Handle<CompilationCacheTable> PutEval(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
int position);
+
+ // The RegExp cache contains JSRegExp::data fixed arrays.
+ Handle<Object> LookupRegExp(Handle<String> source, JSRegExp::Flags flags);
static Handle<CompilationCacheTable> PutRegExp(
Isolate* isolate, Handle<CompilationCacheTable> cache, Handle<String> src,
JSRegExp::Flags flags, Handle<FixedArray> value);
+
+ // The Code cache shares native-context-independent (NCI) code between
+ // contexts.
+ MaybeHandle<Code> LookupCode(Handle<SharedFunctionInfo> key);
static Handle<CompilationCacheTable> PutCode(
Isolate* isolate, Handle<CompilationCacheTable> cache,
Handle<SharedFunctionInfo> key, Handle<Code> value);
+
void Remove(Object value);
void Age();
- static const int kHashGenerations = 10;
DECL_CAST(CompilationCacheTable)
private:
+ void RemoveEntry(int entry_index);
+
OBJECT_CONSTRUCTORS(CompilationCacheTable,
HashTable<CompilationCacheTable, CompilationCacheShape>);
};
@@ -136,4 +144,4 @@ class CompilationCacheTable
#include "src/objects/object-macros-undef.h"
-#endif // V8_OBJECTS_COMPILATION_CACHE_H_
+#endif // V8_OBJECTS_COMPILATION_CACHE_TABLE_H_
diff --git a/deps/v8/src/objects/compressed-slots-inl.h b/deps/v8/src/objects/compressed-slots-inl.h
index 81eff427e4..ecb276ce36 100644
--- a/deps/v8/src/objects/compressed-slots-inl.h
+++ b/deps/v8/src/objects/compressed-slots-inl.h
@@ -33,7 +33,7 @@ Object CompressedObjectSlot::operator*() const {
return Object(DecompressTaggedAny(address(), value));
}
-Object CompressedObjectSlot::load(const Isolate* isolate) const {
+Object CompressedObjectSlot::load(IsolateRoot isolate) const {
Tagged_t value = *location();
return Object(DecompressTaggedAny(isolate, value));
}
@@ -52,7 +52,7 @@ Object CompressedObjectSlot::Relaxed_Load() const {
return Object(DecompressTaggedAny(address(), value));
}
-Object CompressedObjectSlot::Relaxed_Load(const Isolate* isolate) const {
+Object CompressedObjectSlot::Relaxed_Load(IsolateRoot isolate) const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
return Object(DecompressTaggedAny(isolate, value));
}
@@ -85,7 +85,7 @@ MaybeObject CompressedMaybeObjectSlot::operator*() const {
return MaybeObject(DecompressTaggedAny(address(), value));
}
-MaybeObject CompressedMaybeObjectSlot::load(const Isolate* isolate) const {
+MaybeObject CompressedMaybeObjectSlot::load(IsolateRoot isolate) const {
Tagged_t value = *location();
return MaybeObject(DecompressTaggedAny(isolate, value));
}
@@ -99,8 +99,7 @@ MaybeObject CompressedMaybeObjectSlot::Relaxed_Load() const {
return MaybeObject(DecompressTaggedAny(address(), value));
}
-MaybeObject CompressedMaybeObjectSlot::Relaxed_Load(
- const Isolate* isolate) const {
+MaybeObject CompressedMaybeObjectSlot::Relaxed_Load(IsolateRoot isolate) const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
return MaybeObject(DecompressTaggedAny(isolate, value));
}
@@ -126,8 +125,7 @@ HeapObjectReference CompressedHeapObjectSlot::operator*() const {
return HeapObjectReference(DecompressTaggedPointer(address(), value));
}
-HeapObjectReference CompressedHeapObjectSlot::load(
- const Isolate* isolate) const {
+HeapObjectReference CompressedHeapObjectSlot::load(IsolateRoot isolate) const {
Tagged_t value = *location();
return HeapObjectReference(DecompressTaggedPointer(isolate, value));
}
@@ -150,7 +148,7 @@ void CompressedHeapObjectSlot::StoreHeapObject(HeapObject value) const {
// OffHeapCompressedObjectSlot implementation.
//
-Object OffHeapCompressedObjectSlot::load(const Isolate* isolate) const {
+Object OffHeapCompressedObjectSlot::load(IsolateRoot isolate) const {
Tagged_t value = *location();
return Object(DecompressTaggedAny(isolate, value));
}
@@ -159,12 +157,12 @@ void OffHeapCompressedObjectSlot::store(Object value) const {
*location() = CompressTagged(value.ptr());
}
-Object OffHeapCompressedObjectSlot::Relaxed_Load(const Isolate* isolate) const {
+Object OffHeapCompressedObjectSlot::Relaxed_Load(IsolateRoot isolate) const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
return Object(DecompressTaggedAny(isolate, value));
}
-Object OffHeapCompressedObjectSlot::Acquire_Load(const Isolate* isolate) const {
+Object OffHeapCompressedObjectSlot::Acquire_Load(IsolateRoot isolate) const {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location());
return Object(DecompressTaggedAny(isolate, value));
}
diff --git a/deps/v8/src/objects/compressed-slots.h b/deps/v8/src/objects/compressed-slots.h
index b8f3872384..36a6cab596 100644
--- a/deps/v8/src/objects/compressed-slots.h
+++ b/deps/v8/src/objects/compressed-slots.h
@@ -41,12 +41,12 @@ class CompressedObjectSlot : public SlotBase<CompressedObjectSlot, Tagged_t> {
// TODO(leszeks): Consider deprecating the operator* load, and always pass the
// Isolate.
inline Object operator*() const;
- inline Object load(const Isolate* isolate) const;
+ inline Object load(IsolateRoot isolate) const;
inline void store(Object value) const;
inline Object Acquire_Load() const;
inline Object Relaxed_Load() const;
- inline Object Relaxed_Load(const Isolate* isolate) const;
+ inline Object Relaxed_Load(IsolateRoot isolate) const;
inline void Relaxed_Store(Object value) const;
inline void Release_Store(Object value) const;
inline Object Release_CompareAndSwap(Object old, Object target) const;
@@ -77,11 +77,11 @@ class CompressedMaybeObjectSlot
: SlotBase(slot.address()) {}
inline MaybeObject operator*() const;
- inline MaybeObject load(const Isolate* isolate) const;
+ inline MaybeObject load(IsolateRoot isolate) const;
inline void store(MaybeObject value) const;
inline MaybeObject Relaxed_Load() const;
- inline MaybeObject Relaxed_Load(const Isolate* isolate) const;
+ inline MaybeObject Relaxed_Load(IsolateRoot isolate) const;
inline void Relaxed_Store(MaybeObject value) const;
inline void Release_CompareAndSwap(MaybeObject old, MaybeObject target) const;
};
@@ -105,7 +105,7 @@ class CompressedHeapObjectSlot
: SlotBase(slot.address()) {}
inline HeapObjectReference operator*() const;
- inline HeapObjectReference load(const Isolate* isolate) const;
+ inline HeapObjectReference load(IsolateRoot isolate) const;
inline void store(HeapObjectReference value) const;
inline HeapObject ToHeapObject() const;
@@ -131,11 +131,11 @@ class OffHeapCompressedObjectSlot
explicit OffHeapCompressedObjectSlot(const uint32_t* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {}
- inline Object load(const Isolate* isolate) const;
+ inline Object load(IsolateRoot isolate) const;
inline void store(Object value) const;
- inline Object Relaxed_Load(const Isolate* isolate) const;
- inline Object Acquire_Load(const Isolate* isolate) const;
+ inline Object Relaxed_Load(IsolateRoot isolate) const;
+ inline Object Acquire_Load(IsolateRoot isolate) const;
inline void Relaxed_Store(Object value) const;
inline void Release_Store(Object value) const;
inline void Release_CompareAndSwap(Object old, Object target) const;
diff --git a/deps/v8/src/objects/contexts-inl.h b/deps/v8/src/objects/contexts-inl.h
index 9bd30530c9..663ce6a965 100644
--- a/deps/v8/src/objects/contexts-inl.h
+++ b/deps/v8/src/objects/contexts-inl.h
@@ -56,11 +56,11 @@ SMI_ACCESSORS(Context, length, kLengthOffset)
CAST_ACCESSOR(NativeContext)
Object Context::get(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return get(isolate, index);
}
-Object Context::get(const Isolate* isolate, int index) const {
+Object Context::get(IsolateRoot isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index),
static_cast<unsigned>(this->length()));
return TaggedField<Object>::Relaxed_Load(isolate, *this,
@@ -88,11 +88,11 @@ void Context::set_scope_info(ScopeInfo scope_info) {
}
Object Context::synchronized_get(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return synchronized_get(isolate, index);
}
-Object Context::synchronized_get(const Isolate* isolate, int index) const {
+Object Context::synchronized_get(IsolateRoot isolate, int index) const {
DCHECK_LT(static_cast<unsigned int>(index),
static_cast<unsigned int>(this->length()));
return ACQUIRE_READ_FIELD(*this, OffsetOfElementAt(index));
@@ -268,17 +268,19 @@ Map Context::GetInitialJSArrayMap(ElementsKind kind) const {
}
DEF_GETTER(NativeContext, microtask_queue, MicrotaskQueue*) {
- ExternalPointer_t encoded_value =
- ReadField<ExternalPointer_t>(kMicrotaskQueueOffset);
- return reinterpret_cast<MicrotaskQueue*>(
- DecodeExternalPointer(isolate, encoded_value));
+ return reinterpret_cast<MicrotaskQueue*>(ReadExternalPointerField(
+ kMicrotaskQueueOffset, isolate, kNativeContextMicrotaskQueueTag));
+}
+
+void NativeContext::AllocateExternalPointerEntries(Isolate* isolate) {
+ InitExternalPointerField(kMicrotaskQueueOffset, isolate);
}
void NativeContext::set_microtask_queue(Isolate* isolate,
MicrotaskQueue* microtask_queue) {
- ExternalPointer_t encoded_value = EncodeExternalPointer(
- isolate, reinterpret_cast<Address>(microtask_queue));
- WriteField<ExternalPointer_t>(kMicrotaskQueueOffset, encoded_value);
+ WriteExternalPointerField(kMicrotaskQueueOffset, isolate,
+ reinterpret_cast<Address>(microtask_queue),
+ kNativeContextMicrotaskQueueTag);
}
void NativeContext::synchronized_set_script_context_table(
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index e63ed580f4..f62e41c9a8 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -311,6 +311,7 @@ enum ContextLookupFlags {
V(FINALIZATION_REGISTRY_CLEANUP_SOME, JSFunction, \
finalization_registry_cleanup_some) \
V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
+ V(FUNCTION_TO_STRING_INDEX, JSFunction, function_to_string) \
V(OBJECT_TO_STRING, JSFunction, object_to_string) \
V(OBJECT_VALUE_OF_FUNCTION_INDEX, JSFunction, object_value_of_function) \
V(PROMISE_ALL_INDEX, JSFunction, promise_all) \
@@ -438,13 +439,13 @@ class Context : public HeapObject {
// Setter and getter for elements.
V8_INLINE Object get(int index) const;
- V8_INLINE Object get(const Isolate* isolate, int index) const;
+ V8_INLINE Object get(IsolateRoot isolate, int index) const;
V8_INLINE void set(int index, Object value);
// Setter with explicit barrier mode.
V8_INLINE void set(int index, Object value, WriteBarrierMode mode);
// Setter and getter with synchronization semantics.
V8_INLINE Object synchronized_get(int index) const;
- V8_INLINE Object synchronized_get(const Isolate* isolate, int index) const;
+ V8_INLINE Object synchronized_get(IsolateRoot isolate, int index) const;
V8_INLINE void synchronized_set(int index, Object value);
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
@@ -662,6 +663,8 @@ class NativeContext : public Context {
DECL_CAST(NativeContext)
// TODO(neis): Move some stuff from Context here.
+ inline void AllocateExternalPointerEntries(Isolate* isolate);
+
// [microtask_queue]: pointer to the MicrotaskQueue object.
DECL_GETTER(microtask_queue, MicrotaskQueue*)
inline void set_microtask_queue(Isolate* isolate, MicrotaskQueue* queue);
diff --git a/deps/v8/src/objects/data-handler-inl.h b/deps/v8/src/objects/data-handler-inl.h
index f9496cc342..f18f499294 100644
--- a/deps/v8/src/objects/data-handler-inl.h
+++ b/deps/v8/src/objects/data-handler-inl.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/data-handler-tq-inl.inc"
+
OBJECT_CONSTRUCTORS_IMPL(DataHandler, Struct)
CAST_ACCESSOR(DataHandler)
diff --git a/deps/v8/src/objects/data-handler.h b/deps/v8/src/objects/data-handler.h
index c9c0cf4cbc..e27b5be83f 100644
--- a/deps/v8/src/objects/data-handler.h
+++ b/deps/v8/src/objects/data-handler.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_DATA_HANDLER_H_
#include "src/objects/struct.h"
+#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -13,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/data-handler-tq.inc"
+
// DataHandler is a base class for load and store handlers that can't be
// encoded in one Smi. Kind of a handler can be deduced from instance type.
class DataHandler : public Struct {
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
index 886c31583e..a0815d04df 100644
--- a/deps/v8/src/objects/debug-objects-inl.h
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/debug-objects-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(BreakPoint)
TQ_OBJECT_CONSTRUCTORS_IMPL(BreakPointInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(CoverageInfo)
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index b9012fd9c1..e0ddaddd23 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -22,6 +22,8 @@ namespace internal {
class BreakPoint;
class BytecodeArray;
+#include "torque-generated/src/objects/debug-objects-tq.inc"
+
// The DebugInfo class holds additional information for a function being
// debugged.
class DebugInfo : public TorqueGeneratedDebugInfo<DebugInfo, Struct> {
diff --git a/deps/v8/src/objects/descriptor-array-inl.h b/deps/v8/src/objects/descriptor-array-inl.h
index a7c6443a05..21f43d292a 100644
--- a/deps/v8/src/objects/descriptor-array-inl.h
+++ b/deps/v8/src/objects/descriptor-array-inl.h
@@ -24,6 +24,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/descriptor-array-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(DescriptorArray)
TQ_OBJECT_CONSTRUCTORS_IMPL(EnumCache)
@@ -104,11 +106,11 @@ ObjectSlot DescriptorArray::GetDescriptorSlot(int descriptor) {
}
Name DescriptorArray::GetKey(InternalIndex descriptor_number) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return GetKey(isolate, descriptor_number);
}
-Name DescriptorArray::GetKey(const Isolate* isolate,
+Name DescriptorArray::GetKey(IsolateRoot isolate,
InternalIndex descriptor_number) const {
DCHECK_LT(descriptor_number.as_int(), number_of_descriptors());
int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int());
@@ -127,12 +129,11 @@ int DescriptorArray::GetSortedKeyIndex(int descriptor_number) {
}
Name DescriptorArray::GetSortedKey(int descriptor_number) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return GetSortedKey(isolate, descriptor_number);
}
-Name DescriptorArray::GetSortedKey(const Isolate* isolate,
- int descriptor_number) {
+Name DescriptorArray::GetSortedKey(IsolateRoot isolate, int descriptor_number) {
return GetKey(isolate, InternalIndex(GetSortedKeyIndex(descriptor_number)));
}
@@ -142,11 +143,11 @@ void DescriptorArray::SetSortedKey(int descriptor_number, int pointer) {
}
Object DescriptorArray::GetStrongValue(InternalIndex descriptor_number) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return GetStrongValue(isolate, descriptor_number);
}
-Object DescriptorArray::GetStrongValue(const Isolate* isolate,
+Object DescriptorArray::GetStrongValue(IsolateRoot isolate,
InternalIndex descriptor_number) {
return GetValue(isolate, descriptor_number).cast<Object>();
}
@@ -160,11 +161,11 @@ void DescriptorArray::SetValue(InternalIndex descriptor_number,
}
MaybeObject DescriptorArray::GetValue(InternalIndex descriptor_number) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return GetValue(isolate, descriptor_number);
}
-MaybeObject DescriptorArray::GetValue(const Isolate* isolate,
+MaybeObject DescriptorArray::GetValue(IsolateRoot isolate,
InternalIndex descriptor_number) {
DCHECK_LT(descriptor_number.as_int(), number_of_descriptors());
int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int());
@@ -191,11 +192,11 @@ int DescriptorArray::GetFieldIndex(InternalIndex descriptor_number) {
}
FieldType DescriptorArray::GetFieldType(InternalIndex descriptor_number) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return GetFieldType(isolate, descriptor_number);
}
-FieldType DescriptorArray::GetFieldType(const Isolate* isolate,
+FieldType DescriptorArray::GetFieldType(IsolateRoot isolate,
InternalIndex descriptor_number) {
DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
MaybeObject wrapped_type = GetValue(isolate, descriptor_number);
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index 890863d5a0..57f9162c65 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -25,6 +25,8 @@ class Handle;
class Isolate;
+#include "torque-generated/src/objects/descriptor-array-tq.inc"
+
// An EnumCache is a pair used to hold keys and indices caches.
class EnumCache : public TorqueGeneratedEnumCache<EnumCache, Struct> {
public:
@@ -67,22 +69,22 @@ class DescriptorArray
// Accessors for fetching instance descriptor at descriptor number.
inline Name GetKey(InternalIndex descriptor_number) const;
- inline Name GetKey(const Isolate* isolate,
+ inline Name GetKey(IsolateRoot isolate,
InternalIndex descriptor_number) const;
inline Object GetStrongValue(InternalIndex descriptor_number);
- inline Object GetStrongValue(const Isolate* isolate,
+ inline Object GetStrongValue(IsolateRoot isolate,
InternalIndex descriptor_number);
inline MaybeObject GetValue(InternalIndex descriptor_number);
- inline MaybeObject GetValue(const Isolate* isolate,
+ inline MaybeObject GetValue(IsolateRoot isolate,
InternalIndex descriptor_number);
inline PropertyDetails GetDetails(InternalIndex descriptor_number);
inline int GetFieldIndex(InternalIndex descriptor_number);
inline FieldType GetFieldType(InternalIndex descriptor_number);
- inline FieldType GetFieldType(const Isolate* isolate,
+ inline FieldType GetFieldType(IsolateRoot isolate,
InternalIndex descriptor_number);
inline Name GetSortedKey(int descriptor_number);
- inline Name GetSortedKey(const Isolate* isolate, int descriptor_number);
+ inline Name GetSortedKey(IsolateRoot isolate, int descriptor_number);
inline int GetSortedKeyIndex(int descriptor_number);
// Accessor for complete descriptor.
@@ -168,9 +170,7 @@ class DescriptorArray
"Weak fields extend up to the end of the header.");
static_assert(kDescriptorsOffset == kHeaderSize,
"Variable-size array follows header.");
- // We use this visitor to also visitor to also visit the enum_cache, which is
- // the only tagged field in the header, and placed at the end of the header.
- using BodyDescriptor = FlexibleWeakBodyDescriptor<kStartOfStrongFieldsOffset>;
+ class BodyDescriptor;
// Layout of descriptor.
// Naming is consistent with Dictionary classes for easy templating.
diff --git a/deps/v8/src/objects/descriptor-array.tq b/deps/v8/src/objects/descriptor-array.tq
index 0b088b3d73..eb86a3343e 100644
--- a/deps/v8/src/objects/descriptor-array.tq
+++ b/deps/v8/src/objects/descriptor-array.tq
@@ -16,8 +16,9 @@ struct DescriptorEntry {
value: JSAny|Weak<Map>|AccessorInfo|AccessorPair|ClassPositions;
}
-@generateCppClass
-extern class DescriptorArray extends HeapObject {
+@export
+@customCppClass
+class DescriptorArray extends HeapObject {
const number_of_all_descriptors: uint16;
number_of_descriptors: uint16;
raw_number_of_marked_descriptors: uint16;
@@ -25,3 +26,6 @@ extern class DescriptorArray extends HeapObject {
enum_cache: EnumCache;
descriptors[number_of_all_descriptors]: DescriptorEntry;
}
+
+// A descriptor array where all values are held strongly.
+class StrongDescriptorArray extends DescriptorArray {}
diff --git a/deps/v8/src/objects/dictionary-inl.h b/deps/v8/src/objects/dictionary-inl.h
index 97d83eaa55..4df78ac99f 100644
--- a/deps/v8/src/objects/dictionary-inl.h
+++ b/deps/v8/src/objects/dictionary-inl.h
@@ -30,12 +30,12 @@ Dictionary<Derived, Shape>::Dictionary(Address ptr)
template <typename Derived, typename Shape>
Object Dictionary<Derived, Shape>::ValueAt(InternalIndex entry) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return ValueAt(isolate, entry);
}
template <typename Derived, typename Shape>
-Object Dictionary<Derived, Shape>::ValueAt(const Isolate* isolate,
+Object Dictionary<Derived, Shape>::ValueAt(IsolateRoot isolate,
InternalIndex entry) {
return this->get(isolate, DerivedHashTable::EntryToIndex(entry) +
Derived::kEntryValueIndex);
@@ -181,11 +181,11 @@ Handle<Map> GlobalDictionary::GetMap(ReadOnlyRoots roots) {
}
Name NameDictionary::NameAt(InternalIndex entry) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return NameAt(isolate, entry);
}
-Name NameDictionary::NameAt(const Isolate* isolate, InternalIndex entry) {
+Name NameDictionary::NameAt(IsolateRoot isolate, InternalIndex entry) {
return Name::cast(KeyAt(isolate, entry));
}
@@ -194,31 +194,31 @@ Handle<Map> NameDictionary::GetMap(ReadOnlyRoots roots) {
}
PropertyCell GlobalDictionary::CellAt(InternalIndex entry) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return CellAt(isolate, entry);
}
-PropertyCell GlobalDictionary::CellAt(const Isolate* isolate,
+PropertyCell GlobalDictionary::CellAt(IsolateRoot isolate,
InternalIndex entry) {
DCHECK(KeyAt(isolate, entry).IsPropertyCell(isolate));
return PropertyCell::cast(KeyAt(isolate, entry));
}
Name GlobalDictionary::NameAt(InternalIndex entry) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return NameAt(isolate, entry);
}
-Name GlobalDictionary::NameAt(const Isolate* isolate, InternalIndex entry) {
+Name GlobalDictionary::NameAt(IsolateRoot isolate, InternalIndex entry) {
return CellAt(isolate, entry).name(isolate);
}
Object GlobalDictionary::ValueAt(InternalIndex entry) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return ValueAt(isolate, entry);
}
-Object GlobalDictionary::ValueAt(const Isolate* isolate, InternalIndex entry) {
+Object GlobalDictionary::ValueAt(IsolateRoot isolate, InternalIndex entry) {
return CellAt(isolate, entry).value(isolate);
}
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index e8b61dbbb2..d9cc62afc1 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -32,7 +32,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Dictionary
using Key = typename Shape::Key;
// Returns the value at entry.
inline Object ValueAt(InternalIndex entry);
- inline Object ValueAt(const Isolate* isolate, InternalIndex entry);
+ inline Object ValueAt(IsolateRoot isolate, InternalIndex entry);
// Set the value for entry.
inline void ValueAtPut(InternalIndex entry, Object value);
@@ -131,6 +131,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) BaseNameDictionary
static const int kObjectHashIndex = kNextEnumerationIndexIndex + 1;
static const int kEntryValueIndex = 1;
+ static const bool kIsOrderedDictionaryType = false;
+
inline void SetHash(int hash);
inline int Hash() const;
@@ -141,11 +143,6 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) BaseNameDictionary
AllocationType allocation = AllocationType::kYoung,
MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
- // Collect the keys into the given KeyAccumulator, in ascending chronological
- // order of property creation.
- V8_WARN_UNUSED_RESULT static ExceptionStatus CollectKeysTo(
- Handle<Derived> dictionary, KeyAccumulator* keys);
-
// Allocate the next enumeration index. Possibly updates all enumeration
// indices in the table.
static int NextEnumerationIndex(Isolate* isolate, Handle<Derived> dictionary);
@@ -157,13 +154,6 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) BaseNameDictionary
static Handle<FixedArray> IterationIndices(Isolate* isolate,
Handle<Derived> dictionary);
- // Copies enumerable keys to preallocated fixed array.
- // Does not throw for uninitialized exports in module namespace objects, so
- // this has to be checked separately.
- static void CopyEnumKeysTo(Isolate* isolate, Handle<Derived> dictionary,
- Handle<FixedArray> storage, KeyCollectionMode mode,
- KeyAccumulator* accumulator);
-
template <typename LocalIsolate>
V8_WARN_UNUSED_RESULT static Handle<Derived> AddNoUpdateNextEnumerationIndex(
LocalIsolate* isolate, Handle<Derived> dictionary, Key key,
@@ -197,7 +187,7 @@ class V8_EXPORT_PRIVATE NameDictionary
static const int kInitialCapacity = 2;
inline Name NameAt(InternalIndex entry);
- inline Name NameAt(const Isolate* isolate, InternalIndex entry);
+ inline Name NameAt(IsolateRoot isolate, InternalIndex entry);
inline void set_hash(int hash);
inline int hash() const;
@@ -234,14 +224,14 @@ class V8_EXPORT_PRIVATE GlobalDictionary
DECL_CAST(GlobalDictionary)
inline Object ValueAt(InternalIndex entry);
- inline Object ValueAt(const Isolate* isolate, InternalIndex entry);
+ inline Object ValueAt(IsolateRoot isolate, InternalIndex entry);
inline PropertyCell CellAt(InternalIndex entry);
- inline PropertyCell CellAt(const Isolate* isolate, InternalIndex entry);
+ inline PropertyCell CellAt(IsolateRoot isolate, InternalIndex entry);
inline void SetEntry(InternalIndex entry, Object key, Object value,
PropertyDetails details);
inline void ClearEntry(InternalIndex entry);
inline Name NameAt(InternalIndex entry);
- inline Name NameAt(const Isolate* isolate, InternalIndex entry);
+ inline Name NameAt(IsolateRoot isolate, InternalIndex entry);
inline void ValueAtPut(InternalIndex entry, Object value);
OBJECT_CONSTRUCTORS(
@@ -361,6 +351,22 @@ class NumberDictionary
Dictionary<NumberDictionary, NumberDictionaryShape>);
};
+// The comparator is passed two indices |a| and |b|, and it returns < 0 when the
+// property at index |a| comes before the property at index |b| in the
+// enumeration order.
+template <typename Dictionary>
+struct EnumIndexComparator {
+ explicit EnumIndexComparator(Dictionary dict) : dict(dict) {}
+ bool operator()(Tagged_t a, Tagged_t b) {
+ PropertyDetails da(
+ dict.DetailsAt(InternalIndex(Smi(static_cast<Address>(a)).value())));
+ PropertyDetails db(
+ dict.DetailsAt(InternalIndex(Smi(static_cast<Address>(b)).value())));
+ return da.dictionary_index() < db.dictionary_index();
+ }
+ Dictionary dict;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc
index 32bd891a74..4dcbb2befc 100644
--- a/deps/v8/src/objects/elements.cc
+++ b/deps/v8/src/objects/elements.cc
@@ -22,8 +22,6 @@
#include "src/objects/slots-atomic-inl.h"
#include "src/objects/slots.h"
#include "src/utils/utils.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions.h"
// Each concrete ElementsAccessor can handle exactly one ElementsKind,
// several abstract ElementsAccessor classes are used to allow sharing
@@ -179,7 +177,7 @@ void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase from_base,
if (raw_copy_size < 0) {
DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size =
- Min(from_base.length() - from_start, to_base.length() - to_start);
+ std::min(from_base.length() - from_start, to_base.length() - to_start);
int start = to_start + copy_size;
int length = to_base.length() - start;
if (length > 0) {
@@ -252,7 +250,7 @@ void CopyDoubleToObjectElements(Isolate* isolate, FixedArrayBase from_base,
DisallowHeapAllocation no_allocation;
DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size =
- Min(from_base.length() - from_start, to_base.length() - to_start);
+ std::min(from_base.length() - from_start, to_base.length() - to_start);
// Also initialize the area that will be copied over since HeapNumber
// allocation below can cause an incremental marking step, requiring all
// existing heap objects to be propertly initialized.
@@ -296,7 +294,7 @@ void CopyDoubleToDoubleElements(FixedArrayBase from_base, uint32_t from_start,
if (raw_copy_size < 0) {
DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size =
- Min(from_base.length() - from_start, to_base.length() - to_start);
+ std::min(from_base.length() - from_start, to_base.length() - to_start);
for (int i = to_start + copy_size; i < to_base.length(); ++i) {
FixedDoubleArray::cast(to_base).set_the_hole(i);
}
@@ -542,6 +540,8 @@ template <typename Subclass, typename ElementsTraitsParam>
class ElementsAccessorBase : public InternalElementsAccessor {
public:
ElementsAccessorBase() = default;
+ ElementsAccessorBase(const ElementsAccessorBase&) = delete;
+ ElementsAccessorBase& operator=(const ElementsAccessorBase&) = delete;
using ElementsTraits = ElementsTraitsParam;
using BackingStore = typename ElementsTraitsParam::BackingStore;
@@ -704,7 +704,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
// Check whether the backing store should be shrunk.
uint32_t capacity = backing_store->length();
- old_length = Min(old_length, capacity);
+ old_length = std::min(old_length, capacity);
if (length == 0) {
array->initialize_elements();
} else if (length <= capacity) {
@@ -733,7 +733,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
}
} else {
// Check whether the backing store should be expanded.
- capacity = Max(length, JSObject::NewElementsCapacity(capacity));
+ capacity = std::max(length, JSObject::NewElementsCapacity(capacity));
Subclass::GrowCapacityAndConvertImpl(array, capacity);
}
@@ -1325,9 +1325,6 @@ class ElementsAccessorBase : public InternalElementsAccessor {
uint32_t length) {
UNREACHABLE();
}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ElementsAccessorBase);
};
class DictionaryElementsAccessor
@@ -1423,7 +1420,7 @@ class DictionaryElementsAccessor
DisallowHeapAllocation no_gc;
NumberDictionary dict = NumberDictionary::cast(backing_store);
if (!dict.requires_slow_elements()) return false;
- const Isolate* isolate = GetIsolateForPtrCompr(holder);
+ IsolateRoot isolate = GetIsolateForPtrCompr(holder);
ReadOnlyRoots roots = holder.GetReadOnlyRoots(isolate);
for (InternalIndex i : dict.IterateEntries()) {
Object key = dict.KeyAt(isolate, i);
@@ -1812,7 +1809,7 @@ class DictionaryElementsAccessor
if (k.Number() > NumberDictionary::kRequiresSlowElementsLimit) {
requires_slow_elements = true;
} else {
- max_key = Max(max_key, Smi::ToInt(k));
+ max_key = std::max(max_key, Smi::ToInt(k));
}
}
if (requires_slow_elements) {
diff --git a/deps/v8/src/objects/elements.h b/deps/v8/src/objects/elements.h
index 551183fe6d..4a34e866f2 100644
--- a/deps/v8/src/objects/elements.h
+++ b/deps/v8/src/objects/elements.h
@@ -22,6 +22,8 @@ class ElementsAccessor {
public:
ElementsAccessor() = default;
virtual ~ElementsAccessor() = default;
+ ElementsAccessor(const ElementsAccessor&) = delete;
+ ElementsAccessor& operator=(const ElementsAccessor&) = delete;
// Returns a shared ElementsAccessor for the specified ElementsKind.
static ElementsAccessor* ForKind(ElementsKind elements_kind) {
@@ -202,8 +204,6 @@ class ElementsAccessor {
private:
V8_EXPORT_PRIVATE static ElementsAccessor** elements_accessors_;
-
- DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
};
V8_WARN_UNUSED_RESULT MaybeHandle<Object> ArrayConstructInitializeElements(
diff --git a/deps/v8/src/objects/embedder-data-array-inl.h b/deps/v8/src/objects/embedder-data-array-inl.h
index 9c514aef89..6eb1076287 100644
--- a/deps/v8/src/objects/embedder-data-array-inl.h
+++ b/deps/v8/src/objects/embedder-data-array-inl.h
@@ -16,15 +16,16 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/embedder-data-array-tq-inl.inc"
TQ_OBJECT_CONSTRUCTORS_IMPL(EmbedderDataArray)
Address EmbedderDataArray::slots_start() {
- return FIELD_ADDR(*this, OffsetOfElementAt(0));
+ return field_address(OffsetOfElementAt(0));
}
Address EmbedderDataArray::slots_end() {
- return FIELD_ADDR(*this, OffsetOfElementAt(length()));
+ return field_address(OffsetOfElementAt(length()));
}
} // namespace internal
diff --git a/deps/v8/src/objects/embedder-data-array.h b/deps/v8/src/objects/embedder-data-array.h
index 728c3cf86a..5c4389c16d 100644
--- a/deps/v8/src/objects/embedder-data-array.h
+++ b/deps/v8/src/objects/embedder-data-array.h
@@ -8,7 +8,6 @@
#include "src/common/globals.h"
#include "src/handles/maybe-handles.h"
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,6 +15,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/embedder-data-array-tq.inc"
+
// This is a storage array for embedder data fields stored in native context.
// It's basically an "array of EmbedderDataSlots".
// Note, if the pointer compression is enabled the embedder data slot also
diff --git a/deps/v8/src/objects/embedder-data-slot-inl.h b/deps/v8/src/objects/embedder-data-slot-inl.h
index 2ede262f80..f9ef6e1e56 100644
--- a/deps/v8/src/objects/embedder-data-slot-inl.h
+++ b/deps/v8/src/objects/embedder-data-slot-inl.h
@@ -27,6 +27,19 @@ EmbedderDataSlot::EmbedderDataSlot(JSObject object, int embedder_field_index)
: SlotBase(FIELD_ADDR(
object, object.GetEmbedderFieldOffset(embedder_field_index))) {}
+void EmbedderDataSlot::AllocateExternalPointerEntry(Isolate* isolate) {
+#ifdef V8_HEAP_SANDBOX
+ // TODO(v8:10391, saelo): Use InitExternalPointerField() once
+ // ExternalPointer_t is 4-bytes.
+ uint32_t index = isolate->external_pointer_table().allocate();
+ // Object slots don't support storing raw values, so we just "reinterpret
+ // cast" the index value to Object.
+ Object index_as_object(index);
+ ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(index_as_object);
+ ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi::zero());
+#endif
+}
+
Object EmbedderDataSlot::load_tagged() const {
return ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Load();
}
@@ -61,40 +74,75 @@ void EmbedderDataSlot::store_tagged(JSObject object, int embedder_field_index,
.Relaxed_Store(value);
WRITE_BARRIER(object, slot_offset + kTaggedPayloadOffset, value);
#ifdef V8_COMPRESS_POINTERS
- // See gc_safe_store() for the reasons behind two stores.
+ // See gc_safe_store() for the reasons behind two stores and why the second is
+ // only done if !V8_HEAP_SANDBOX_BOOL
ObjectSlot(FIELD_ADDR(object, slot_offset + kRawPayloadOffset))
.Relaxed_Store(Smi::zero());
#endif
}
-bool EmbedderDataSlot::ToAlignedPointer(const Isolate* isolate,
+bool EmbedderDataSlot::ToAlignedPointer(IsolateRoot isolate_root,
void** out_pointer) const {
// We don't care about atomicity of access here because embedder slots
// are accessed this way only from the main thread via API during "mutator"
// phase which is propely synched with GC (concurrent marker may still look
// at the tagged part of the embedder slot but read-only access is ok).
-#ifdef V8_COMPRESS_POINTERS
- // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
- // fields (external pointers, doubles and BigInt data) are only kTaggedSize
- // aligned so we have to use unaligned pointer friendly way of accessing them
- // in order to avoid undefined behavior in C++ code.
- Address raw_value = base::ReadUnalignedValue<Address>(address());
- // We currently have to treat zero as nullptr in embedder slots.
- if (raw_value) raw_value = DecodeExternalPointer(isolate, raw_value);
+ Address raw_value;
+#ifdef V8_HEAP_SANDBOX
+ uint32_t index = base::Memory<uint32_t>(address() + kRawPayloadOffset);
+ const Isolate* isolate = Isolate::FromRootAddress(isolate_root.address());
+ raw_value = isolate->external_pointer_table().get(index) ^
+ kEmbedderDataSlotPayloadTag;
#else
- Address raw_value = *location();
+ if (COMPRESS_POINTERS_BOOL) {
+ // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
+ // fields (external pointers, doubles and BigInt data) are only kTaggedSize
+ // aligned so we have to use unaligned pointer friendly way of accessing
+ // them in order to avoid undefined behavior in C++ code.
+ raw_value = base::ReadUnalignedValue<Address>(address());
+ } else {
+ raw_value = *location();
+ }
#endif
*out_pointer = reinterpret_cast<void*>(raw_value);
return HAS_SMI_TAG(raw_value);
}
+bool EmbedderDataSlot::ToAlignedPointerSafe(IsolateRoot isolate_root,
+ void** out_pointer) const {
+#ifdef V8_HEAP_SANDBOX
+ uint32_t index = base::Memory<uint32_t>(address() + kRawPayloadOffset);
+ Address raw_value;
+ const Isolate* isolate = Isolate::FromRootAddress(isolate_root.address());
+ if (isolate->external_pointer_table().is_valid_index(index)) {
+ raw_value = isolate->external_pointer_table().get(index) ^
+ kEmbedderDataSlotPayloadTag;
+ *out_pointer = reinterpret_cast<void*>(raw_value);
+ return true;
+ }
+ return false;
+#else
+ return ToAlignedPointer(isolate_root, out_pointer);
+#endif // V8_HEAP_SANDBOX
+}
+
bool EmbedderDataSlot::store_aligned_pointer(Isolate* isolate, void* ptr) {
Address value = reinterpret_cast<Address>(ptr);
if (!HAS_SMI_TAG(value)) return false;
- // We currently have to treat zero as nullptr in embedder slots.
- if (value) value = EncodeExternalPointer(isolate, value);
- DCHECK(HAS_SMI_TAG(value));
- gc_safe_store(value);
+#ifdef V8_HEAP_SANDBOX
+ if (V8_HEAP_SANDBOX_BOOL) {
+ AllocateExternalPointerEntry(isolate);
+ // Raw payload contains the table index. Object slots don't support loading
+ // of raw values, so we just "reinterpret cast" Object value to index.
+ Object index_as_object =
+ ObjectSlot(address() + kRawPayloadOffset).Relaxed_Load();
+ uint32_t index = static_cast<uint32_t>(index_as_object.ptr());
+ isolate->external_pointer_table().set(index,
+ value ^ kEmbedderDataSlotPayloadTag);
+ return true;
+ }
+#endif
+ gc_safe_store(isolate, value);
return true;
}
@@ -109,10 +157,7 @@ EmbedderDataSlot::RawData EmbedderDataSlot::load_raw(
// fields (external pointers, doubles and BigInt data) are only kTaggedSize
// aligned so we have to use unaligned pointer friendly way of accessing them
// in order to avoid undefined behavior in C++ code.
- Address value = base::ReadUnalignedValue<Address>(address());
- // We currently have to treat zero as nullptr in embedder slots.
- if (value) return DecodeExternalPointer(isolate, value);
- return value;
+ return base::ReadUnalignedValue<EmbedderDataSlot::RawData>(address());
#else
return *location();
#endif
@@ -121,16 +166,15 @@ EmbedderDataSlot::RawData EmbedderDataSlot::load_raw(
void EmbedderDataSlot::store_raw(Isolate* isolate,
EmbedderDataSlot::RawData data,
const DisallowGarbageCollection& no_gc) {
- // We currently have to treat zero as nullptr in embedder slots.
- if (data) data = EncodeExternalPointer(isolate, data);
- gc_safe_store(data);
+ gc_safe_store(isolate, data);
}
-void EmbedderDataSlot::gc_safe_store(Address value) {
+void EmbedderDataSlot::gc_safe_store(Isolate* isolate, Address value) {
#ifdef V8_COMPRESS_POINTERS
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(SmiValuesAre31Bits());
STATIC_ASSERT(kTaggedSize == kInt32Size);
+
// We have to do two 32-bit stores here because
// 1) tagged part modifications must be atomic to be properly synchronized
// with the concurrent marker.
diff --git a/deps/v8/src/objects/embedder-data-slot.h b/deps/v8/src/objects/embedder-data-slot.h
index 68d71c0177..8f4fcc8af2 100644
--- a/deps/v8/src/objects/embedder-data-slot.h
+++ b/deps/v8/src/objects/embedder-data-slot.h
@@ -43,7 +43,11 @@ class EmbedderDataSlot
#endif
#ifdef V8_COMPRESS_POINTERS
- // The raw payload is located in the other tagged part of the full pointer.
+ // The raw payload is located in the other "tagged" part of the full pointer
+ // and cotains the upper part of aligned address. The raw part is not expected
+ // to look like a tagged value.
+ // When V8_HEAP_SANDBOX is defined the raw payload contains an index into the
+ // external pointer table.
static constexpr int kRawPayloadOffset = kTaggedSize - kTaggedPayloadOffset;
#endif
static constexpr int kRequiredPtrAlignment = kSmiTagSize;
@@ -51,6 +55,8 @@ class EmbedderDataSlot
// Opaque type used for storing raw embedder data.
using RawData = Address;
+ V8_INLINE void AllocateExternalPointerEntry(Isolate* isolate);
+
V8_INLINE Object load_tagged() const;
V8_INLINE void store_smi(Smi value);
@@ -66,8 +72,22 @@ class EmbedderDataSlot
// the pointer-like value. Note, that some Smis could still look like an
// aligned pointers.
// Returns true on success.
- V8_INLINE bool ToAlignedPointer(const Isolate* isolate,
- void** out_result) const;
+ // When V8 heap sandbox is enabled, calling this method when the raw part of
+ // the slot does not contain valid external pointer table index is undefined
+ // behaviour and most likely result in crashes.
+ V8_INLINE bool ToAlignedPointer(IsolateRoot isolate, void** out_result) const;
+
+ // Same as ToAlignedPointer() but with a workaround for V8 heap sandbox.
+ // When V8 heap sandbox is enabled, this method doesn't crash when the raw
+ // part of the slot contains "undefined" instead of a correct external table
+ // entry index (see Factory::InitializeJSObjectBody() for details).
+ // Returns true when the external pointer table index was pointing to a valid
+ // entry, otherwise false.
+ //
+ // Call this function if you are not sure whether the slot contains valid
+ // external pointer or not.
+ V8_INLINE bool ToAlignedPointerSafe(IsolateRoot isolate,
+ void** out_result) const;
// Returns true if the pointer was successfully stored or false it the pointer
// was improperly aligned.
@@ -82,7 +102,7 @@ class EmbedderDataSlot
private:
// Stores given value to the embedder data slot in a concurrent-marker
// friendly manner (tagged part of the slot is written atomically).
- V8_INLINE void gc_safe_store(Address value);
+ V8_INLINE void gc_safe_store(Isolate* isolate, Address value);
};
} // namespace internal
diff --git a/deps/v8/src/objects/feedback-cell-inl.h b/deps/v8/src/objects/feedback-cell-inl.h
index 36d9bc8569..494a951ce4 100644
--- a/deps/v8/src/objects/feedback-cell-inl.h
+++ b/deps/v8/src/objects/feedback-cell-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/feedback-cell-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(FeedbackCell)
void FeedbackCell::clear_padding() {
@@ -55,6 +57,17 @@ void FeedbackCell::SetInterruptBudget() {
set_interrupt_budget(FLAG_interrupt_budget);
}
+void FeedbackCell::IncrementClosureCount(Isolate* isolate) {
+ ReadOnlyRoots r(isolate);
+ if (map() == r.no_closures_cell_map()) {
+ set_map(r.one_closure_cell_map());
+ } else if (map() == r.one_closure_cell_map()) {
+ set_map(r.many_closures_cell_map());
+ } else {
+ DCHECK(map() == r.many_closures_cell_map());
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/feedback-cell.h b/deps/v8/src/objects/feedback-cell.h
index 9728f8e8c0..19f1075e62 100644
--- a/deps/v8/src/objects/feedback-cell.h
+++ b/deps/v8/src/objects/feedback-cell.h
@@ -13,6 +13,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/feedback-cell-tq.inc"
+
// This is a special cell used to maintain both the link between a
// closure and its feedback vector, as well as a way to count the
// number of closures created for a certain function per native
@@ -34,6 +36,11 @@ class FeedbackCell : public TorqueGeneratedFeedbackCell<FeedbackCell, Struct> {
inline void SetInitialInterruptBudget();
inline void SetInterruptBudget();
+ // The closure count is encoded in the cell's map, which distinguishes
+ // between zero, one, or many closures. This function records a new closure
+ // creation by updating the map.
+ inline void IncrementClosureCount(Isolate* isolate);
+
using BodyDescriptor =
FixedBodyDescriptor<kValueOffset, kInterruptBudgetOffset, kAlignedSize>;
diff --git a/deps/v8/src/objects/feedback-vector-inl.h b/deps/v8/src/objects/feedback-vector-inl.h
index 6db9230ebb..2e23c35b5f 100644
--- a/deps/v8/src/objects/feedback-vector-inl.h
+++ b/deps/v8/src/objects/feedback-vector-inl.h
@@ -21,6 +21,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/feedback-vector-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(FeedbackVector)
OBJECT_CONSTRUCTORS_IMPL(FeedbackMetadata, HeapObject)
OBJECT_CONSTRUCTORS_IMPL(ClosureFeedbackCellArray, FixedArray)
@@ -33,12 +35,12 @@ CAST_ACCESSOR(ClosureFeedbackCellArray)
INT32_ACCESSORS(FeedbackMetadata, slot_count, kSlotCountOffset)
-INT32_ACCESSORS(FeedbackMetadata, closure_feedback_cell_count,
- kFeedbackCellCountOffset)
+INT32_ACCESSORS(FeedbackMetadata, create_closure_slot_count,
+ kCreateClosureSlotCountOffset)
int32_t FeedbackMetadata::synchronized_slot_count() const {
- return base::Acquire_Load(reinterpret_cast<const base::Atomic32*>(
- FIELD_ADDR(*this, kSlotCountOffset)));
+ return base::Acquire_Load(
+ reinterpret_cast<const base::Atomic32*>(field_address(kSlotCountOffset)));
}
int32_t FeedbackMetadata::get(int index) const {
@@ -98,8 +100,6 @@ Handle<FeedbackCell> ClosureFeedbackCellArray::GetFeedbackCell(int index) {
return handle(FeedbackCell::cast(get(index)), GetIsolate());
}
-void FeedbackVector::clear_padding() { set_padding(0); }
-
bool FeedbackVector::is_empty() const { return length() == 0; }
FeedbackMetadata FeedbackVector::metadata() const {
@@ -109,17 +109,30 @@ FeedbackMetadata FeedbackVector::metadata() const {
void FeedbackVector::clear_invocation_count() { set_invocation_count(0); }
Code FeedbackVector::optimized_code() const {
- MaybeObject slot = optimized_code_weak_or_smi();
- DCHECK(slot->IsSmi() || slot->IsWeakOrCleared());
+ MaybeObject slot = maybe_optimized_code();
+ DCHECK(slot->IsWeakOrCleared());
HeapObject heap_object;
- return slot->GetHeapObject(&heap_object) ? Code::cast(heap_object) : Code();
+ Code code =
+ slot->GetHeapObject(&heap_object) ? Code::cast(heap_object) : Code();
+ // It is possible that the maybe_optimized_code slot is cleared but the
+ // optimization tier hasn't been updated yet. We update the tier when we
+ // execute the function next time / when we create new closure.
+ DCHECK_IMPLIES(!code.is_null(), OptimizationTierBits::decode(flags()) ==
+ GetTierForCodeKind(code.kind()));
+ return code;
}
OptimizationMarker FeedbackVector::optimization_marker() const {
- MaybeObject slot = optimized_code_weak_or_smi();
- Smi value;
- if (!slot->ToSmi(&value)) return OptimizationMarker::kNone;
- return static_cast<OptimizationMarker>(value.value());
+ return OptimizationMarkerBits::decode(flags());
+}
+
+OptimizationTier FeedbackVector::optimization_tier() const {
+ OptimizationTier tier = OptimizationTierBits::decode(flags());
+ // It is possible that the optimization tier bits aren't updated when the code
+ // was cleared due to a GC.
+ DCHECK_IMPLIES(tier == OptimizationTier::kNone,
+ maybe_optimized_code()->IsCleared());
+ return tier;
}
bool FeedbackVector::has_optimized_code() const {
@@ -139,13 +152,28 @@ FeedbackSlot FeedbackVector::ToSlot(intptr_t index) {
return FeedbackSlot(static_cast<int>(index));
}
+#ifdef DEBUG
+// Instead of FixedArray, the Feedback and the Extra should contain
+// WeakFixedArrays. The only allowed FixedArray subtype is HashTable.
+bool FeedbackVector::IsOfLegacyType(MaybeObject value) {
+ HeapObject heap_object;
+ if (value->GetHeapObject(&heap_object)) {
+ return heap_object.IsFixedArray() && !heap_object.IsHashTable();
+ }
+ return false;
+}
+#endif // DEBUG
+
MaybeObject FeedbackVector::Get(FeedbackSlot slot) const {
- return raw_feedback_slots(GetIndex(slot));
+ MaybeObject value = raw_feedback_slots(GetIndex(slot));
+ DCHECK(!IsOfLegacyType(value));
+ return value;
}
-MaybeObject FeedbackVector::Get(const Isolate* isolate,
- FeedbackSlot slot) const {
- return raw_feedback_slots(isolate, GetIndex(slot));
+MaybeObject FeedbackVector::Get(IsolateRoot isolate, FeedbackSlot slot) const {
+ MaybeObject value = raw_feedback_slots(isolate, GetIndex(slot));
+ DCHECK(!IsOfLegacyType(value));
+ return value;
}
Handle<FeedbackCell> FeedbackVector::GetClosureFeedbackCell(int index) const {
@@ -155,14 +183,41 @@ Handle<FeedbackCell> FeedbackVector::GetClosureFeedbackCell(int index) const {
return cell_array.GetFeedbackCell(index);
}
+MaybeObject FeedbackVector::SynchronizedGet(FeedbackSlot slot) const {
+ const int i = slot.ToInt();
+ DCHECK_LT(static_cast<unsigned>(i), static_cast<unsigned>(this->length()));
+ const int offset = kRawFeedbackSlotsOffset + i * kTaggedSize;
+ MaybeObject value = TaggedField<MaybeObject>::Acquire_Load(*this, offset);
+ DCHECK(!IsOfLegacyType(value));
+ return value;
+}
+
+void FeedbackVector::SynchronizedSet(FeedbackSlot slot, MaybeObject value,
+ WriteBarrierMode mode) {
+ DCHECK(!IsOfLegacyType(value));
+ const int i = slot.ToInt();
+ DCHECK_LT(static_cast<unsigned>(i), static_cast<unsigned>(this->length()));
+ const int offset = kRawFeedbackSlotsOffset + i * kTaggedSize;
+ TaggedField<MaybeObject>::Release_Store(*this, offset, value);
+ CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode);
+}
+
+void FeedbackVector::SynchronizedSet(FeedbackSlot slot, Object value,
+ WriteBarrierMode mode) {
+ SynchronizedSet(slot, MaybeObject::FromObject(value), mode);
+}
+
void FeedbackVector::Set(FeedbackSlot slot, MaybeObject value,
WriteBarrierMode mode) {
+ DCHECK(!IsOfLegacyType(value));
set_raw_feedback_slots(GetIndex(slot), value, mode);
}
void FeedbackVector::Set(FeedbackSlot slot, Object value,
WriteBarrierMode mode) {
- set_raw_feedback_slots(GetIndex(slot), MaybeObject::FromObject(value), mode);
+ MaybeObject maybe_value = MaybeObject::FromObject(value);
+ DCHECK(!IsOfLegacyType(maybe_value));
+ set_raw_feedback_slots(GetIndex(slot), maybe_value, mode);
}
inline MaybeObjectSlot FeedbackVector::slots_start() {
@@ -237,7 +292,7 @@ CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
}
// Helper function to transform the feedback to ForInHint.
-ForInHint ForInHintFromFeedback(int type_feedback) {
+ForInHint ForInHintFromFeedback(ForInFeedback type_feedback) {
switch (type_feedback) {
case ForInFeedback::kNone:
return ForInHint::kNone;
@@ -255,10 +310,6 @@ Handle<Symbol> FeedbackVector::UninitializedSentinel(Isolate* isolate) {
return isolate->factory()->uninitialized_symbol();
}
-Handle<Symbol> FeedbackVector::GenericSentinel(Isolate* isolate) {
- return isolate->factory()->generic_symbol();
-}
-
Handle<Symbol> FeedbackVector::MegamorphicSentinel(Isolate* isolate) {
return isolate->factory()->megamorphic_symbol();
}
@@ -283,46 +334,91 @@ int FeedbackMetadataIterator::entry_size() const {
return FeedbackMetadata::GetSlotSize(kind());
}
-MaybeObject FeedbackNexus::GetFeedback() const {
- MaybeObject feedback = vector().Get(slot());
- FeedbackVector::AssertNoLegacyTypes(feedback);
- return feedback;
+MaybeObject NexusConfig::GetFeedback(FeedbackVector vector,
+ FeedbackSlot slot) const {
+ return vector.SynchronizedGet(slot);
}
-MaybeObject FeedbackNexus::GetFeedbackExtra() const {
-#ifdef DEBUG
- FeedbackSlotKind kind = vector().GetKind(slot());
- DCHECK_LT(1, FeedbackMetadata::GetSlotSize(kind));
-#endif
- return vector().Get(slot().WithOffset(1));
+void NexusConfig::SetFeedback(FeedbackVector vector, FeedbackSlot slot,
+ MaybeObject feedback,
+ WriteBarrierMode mode) const {
+ DCHECK(can_write());
+ vector.SynchronizedSet(slot, feedback, mode);
}
-void FeedbackNexus::SetFeedback(Object feedback, WriteBarrierMode mode) {
- SetFeedback(MaybeObject::FromObject(feedback));
+MaybeObject FeedbackNexus::UninitializedSentinel() const {
+ return MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(GetIsolate()));
}
-void FeedbackNexus::SetFeedback(MaybeObject feedback, WriteBarrierMode mode) {
- FeedbackVector::AssertNoLegacyTypes(feedback);
- vector().Set(slot(), feedback, mode);
+MaybeObject FeedbackNexus::MegamorphicSentinel() const {
+ return MaybeObject::FromObject(
+ *FeedbackVector::MegamorphicSentinel(GetIsolate()));
}
-void FeedbackNexus::SetFeedbackExtra(Object feedback_extra,
- WriteBarrierMode mode) {
-#ifdef DEBUG
- FeedbackSlotKind kind = vector().GetKind(slot());
- DCHECK_LT(1, FeedbackMetadata::GetSlotSize(kind));
- FeedbackVector::AssertNoLegacyTypes(MaybeObject::FromObject(feedback_extra));
-#endif
- vector().Set(slot().WithOffset(1), MaybeObject::FromObject(feedback_extra),
- mode);
+MaybeObject FeedbackNexus::FromHandle(MaybeObjectHandle slot) const {
+ return slot.is_null() ? HeapObjectReference::ClearedValue(config()->isolate())
+ : *slot;
}
-void FeedbackNexus::SetFeedbackExtra(MaybeObject feedback_extra,
- WriteBarrierMode mode) {
-#ifdef DEBUG
- FeedbackVector::AssertNoLegacyTypes(feedback_extra);
-#endif
- vector().Set(slot().WithOffset(1), feedback_extra, mode);
+MaybeObjectHandle FeedbackNexus::ToHandle(MaybeObject value) const {
+ return value.IsCleared() ? MaybeObjectHandle()
+ : MaybeObjectHandle(config()->NewHandle(value));
+}
+
+MaybeObject FeedbackNexus::GetFeedback() const {
+ auto pair = GetFeedbackPair();
+ return pair.first;
+}
+
+MaybeObject FeedbackNexus::GetFeedbackExtra() const {
+ auto pair = GetFeedbackPair();
+ return pair.second;
+}
+
+std::pair<MaybeObject, MaybeObject> FeedbackNexus::GetFeedbackPair() const {
+ if (config()->mode() == NexusConfig::BackgroundThread &&
+ feedback_cache_.has_value()) {
+ return std::make_pair(FromHandle(feedback_cache_->first),
+ FromHandle(feedback_cache_->second));
+ }
+ auto pair = FeedbackMetadata::GetSlotSize(kind()) == 2
+ ? config()->GetFeedbackPair(vector(), slot())
+ : std::make_pair(config()->GetFeedback(vector(), slot()),
+ MaybeObject());
+ if (config()->mode() == NexusConfig::BackgroundThread &&
+ !feedback_cache_.has_value()) {
+ feedback_cache_ =
+ std::make_pair(ToHandle(pair.first), ToHandle(pair.second));
+ }
+ return pair;
+}
+
+template <typename T>
+struct IsValidFeedbackType
+ : public std::integral_constant<bool,
+ std::is_base_of<MaybeObject, T>::value ||
+ std::is_base_of<Object, T>::value> {};
+
+template <typename FeedbackType>
+void FeedbackNexus::SetFeedback(FeedbackType feedback, WriteBarrierMode mode) {
+ static_assert(IsValidFeedbackType<FeedbackType>(),
+ "feedbacks need to be Smi, Object or MaybeObject");
+ MaybeObject fmo = MaybeObject::Create(feedback);
+ config()->SetFeedback(vector(), slot(), fmo, mode);
+}
+
+template <typename FeedbackType, typename FeedbackExtraType>
+void FeedbackNexus::SetFeedback(FeedbackType feedback, WriteBarrierMode mode,
+ FeedbackExtraType feedback_extra,
+ WriteBarrierMode mode_extra) {
+ static_assert(IsValidFeedbackType<FeedbackType>(),
+ "feedbacks need to be Smi, Object or MaybeObject");
+ static_assert(IsValidFeedbackType<FeedbackExtraType>(),
+ "feedbacks need to be Smi, Object or MaybeObject");
+ MaybeObject fmo = MaybeObject::Create(feedback);
+ MaybeObject fmo_extra = MaybeObject::Create(feedback_extra);
+ config()->SetFeedbackPair(vector(), slot(), fmo, mode, fmo_extra, mode_extra);
}
Isolate* FeedbackNexus::GetIsolate() const { return vector().GetIsolate(); }
diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
index d48cd7eb28..7c30e1a045 100644
--- a/deps/v8/src/objects/feedback-vector.cc
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -5,6 +5,7 @@
#include "src/objects/feedback-vector.h"
#include "src/diagnostics/code-tracer.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/local-factory-inl.h"
#include "src/ic/handler-configuration-inl.h"
#include "src/ic/ic-inl.h"
@@ -19,7 +20,7 @@ namespace v8 {
namespace internal {
FeedbackSlot FeedbackVectorSpec::AddSlot(FeedbackSlotKind kind) {
- int slot = slots();
+ int slot = slot_count();
int entries_per_slot = FeedbackMetadata::GetSlotSize(kind);
append(kind);
for (int i = 1; i < entries_per_slot; i++) {
@@ -38,9 +39,7 @@ FeedbackSlot FeedbackVectorSpec::AddTypeProfileSlot() {
bool FeedbackVectorSpec::HasTypeProfileSlot() const {
FeedbackSlot slot =
FeedbackVector::ToSlot(FeedbackVectorSpec::kTypeProfileSlotIndex);
- if (slots() <= slot.ToInt()) {
- return false;
- }
+ if (slot_count() <= slot.ToInt()) return false;
return GetKind(slot) == FeedbackSlotKind::kTypeProfile;
}
@@ -81,10 +80,10 @@ Handle<FeedbackMetadata> FeedbackMetadata::New(LocalIsolate* isolate,
const FeedbackVectorSpec* spec) {
auto* factory = isolate->factory();
- const int slot_count = spec == nullptr ? 0 : spec->slots();
- const int closure_feedback_cell_count =
- spec == nullptr ? 0 : spec->closure_feedback_cells();
- if (slot_count == 0 && closure_feedback_cell_count == 0) {
+ const int slot_count = spec == nullptr ? 0 : spec->slot_count();
+ const int create_closure_slot_count =
+ spec == nullptr ? 0 : spec->create_closure_slot_count();
+ if (slot_count == 0 && create_closure_slot_count == 0) {
return factory->empty_feedback_metadata();
}
#ifdef DEBUG
@@ -101,7 +100,7 @@ Handle<FeedbackMetadata> FeedbackMetadata::New(LocalIsolate* isolate,
#endif
Handle<FeedbackMetadata> metadata =
- factory->NewFeedbackMetadata(slot_count, closure_feedback_cell_count);
+ factory->NewFeedbackMetadata(slot_count, create_closure_slot_count);
// Initialize the slots. The raw data section has already been pre-zeroed in
// NewFeedbackMetadata.
@@ -122,7 +121,7 @@ template Handle<FeedbackMetadata> FeedbackMetadata::New(
bool FeedbackMetadata::SpecDiffersFrom(
const FeedbackVectorSpec* other_spec) const {
- if (other_spec->slots() != slot_count()) {
+ if (other_spec->slot_count() != slot_count()) {
return true;
}
@@ -220,7 +219,7 @@ Handle<ClosureFeedbackCellArray> ClosureFeedbackCellArray::New(
Factory* factory = isolate->factory();
int num_feedback_cells =
- shared->feedback_metadata().closure_feedback_cell_count();
+ shared->feedback_metadata().create_closure_slot_count();
Handle<ClosureFeedbackCellArray> feedback_cell_array =
factory->NewClosureFeedbackCellArray(num_feedback_cells);
@@ -251,16 +250,18 @@ Handle<FeedbackVector> FeedbackVector::New(
DCHECK_EQ(vector->length(), slot_count);
DCHECK_EQ(vector->shared_function_info(), *shared);
- DCHECK_EQ(
- vector->optimized_code_weak_or_smi(),
- MaybeObject::FromSmi(Smi::FromEnum(
- FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
- : OptimizationMarker::kNone)));
+ DCHECK_EQ(vector->optimization_marker(),
+ FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
+ : OptimizationMarker::kNone);
+ // TODO(mythria): This might change if NCI code is installed on feedback
+ // vector. Update this accordingly.
+ DCHECK_EQ(vector->optimization_tier(), OptimizationTier::kNone);
DCHECK_EQ(vector->invocation_count(), 0);
DCHECK_EQ(vector->profiler_ticks(), 0);
+ DCHECK(vector->maybe_optimized_code()->IsCleared());
// Ensure we can skip the write barrier
- Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
+ Handle<Symbol> uninitialized_sentinel = UninitializedSentinel(isolate);
DCHECK_EQ(ReadOnlyRoots(isolate).uninitialized_symbol(),
*uninitialized_sentinel);
for (int i = 0; i < slot_count;) {
@@ -268,7 +269,7 @@ Handle<FeedbackVector> FeedbackVector::New(
FeedbackSlotKind kind = feedback_metadata->GetKind(slot);
int entry_size = FeedbackMetadata::GetSlotSize(kind);
- Object extra_value = *uninitialized_sentinel;
+ MaybeObject extra_value = MaybeObject::FromObject(*uninitialized_sentinel);
switch (kind) {
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
@@ -287,7 +288,7 @@ Handle<FeedbackVector> FeedbackVector::New(
break;
case FeedbackSlotKind::kCall:
vector->Set(slot, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
- extra_value = Smi::zero();
+ extra_value = MaybeObject::FromObject(Smi::zero());
break;
case FeedbackSlotKind::kCloneObject:
case FeedbackSlotKind::kLoadProperty:
@@ -382,32 +383,62 @@ void FeedbackVector::SaturatingIncrementProfilerTicks() {
void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
Handle<Code> code) {
DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
- vector->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*code));
+ // We should only set optimized code only when there is no valid optimized
+ // code or we are tiering up.
+ DCHECK(!vector->has_optimized_code() ||
+ vector->optimized_code().marked_for_deoptimization() ||
+ (vector->optimized_code().kind() == CodeKind::TURBOPROP &&
+ code->kind() == CodeKind::TURBOFAN));
+ // TODO(mythria): We could see a CompileOptimized marker here either from
+ // tests that use %OptimizeFunctionOnNextCall or because we re-mark the
+ // function for non-concurrent optimization after an OSR. We should avoid
+ // these cases and also check that marker isn't kCompileOptimized.
+ DCHECK(vector->optimization_marker() !=
+ OptimizationMarker::kCompileOptimizedConcurrent);
+ vector->set_maybe_optimized_code(HeapObjectReference::Weak(*code));
+ int32_t state = vector->flags();
+ state = OptimizationTierBits::update(state, GetTierForCodeKind(code->kind()));
+ state = OptimizationMarkerBits::update(state, OptimizationMarker::kNone);
+ vector->set_flags(state);
}
void FeedbackVector::ClearOptimizedCode() {
DCHECK(has_optimized_code());
- SetOptimizationMarker(OptimizationMarker::kNone);
+ DCHECK_NE(optimization_tier(), OptimizationTier::kNone);
+ set_maybe_optimized_code(HeapObjectReference::ClearedValue(GetIsolate()));
+ ClearOptimizationTier();
}
void FeedbackVector::ClearOptimizationMarker() {
- DCHECK(!has_optimized_code());
SetOptimizationMarker(OptimizationMarker::kNone);
}
void FeedbackVector::SetOptimizationMarker(OptimizationMarker marker) {
- set_optimized_code_weak_or_smi(MaybeObject::FromSmi(Smi::FromEnum(marker)));
+ int32_t state = flags();
+ state = OptimizationMarkerBits::update(state, marker);
+ set_flags(state);
+}
+
+void FeedbackVector::ClearOptimizationTier() {
+ int32_t state = flags();
+ state = OptimizationTierBits::update(state, OptimizationTier::kNone);
+ set_flags(state);
+}
+
+void FeedbackVector::InitializeOptimizationState() {
+ int32_t state = 0;
+ state = OptimizationMarkerBits::update(
+ state, FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
+ : OptimizationMarker::kNone);
+ state = OptimizationTierBits::update(state, OptimizationTier::kNone);
+ set_flags(state);
}
void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
SharedFunctionInfo shared, const char* reason) {
- MaybeObject slot = optimized_code_weak_or_smi();
- if (slot->IsSmi()) {
- return;
- }
-
+ MaybeObject slot = maybe_optimized_code();
if (slot->IsCleared()) {
- ClearOptimizationMarker();
+ ClearOptimizationTier();
return;
}
@@ -440,20 +471,77 @@ bool FeedbackVector::ClearSlots(Isolate* isolate) {
return feedback_updated;
}
-void FeedbackVector::AssertNoLegacyTypes(MaybeObject object) {
-#ifdef DEBUG
- HeapObject heap_object;
- if (object->GetHeapObject(&heap_object)) {
- // Instead of FixedArray, the Feedback and the Extra should contain
- // WeakFixedArrays. The only allowed FixedArray subtype is HashTable.
- DCHECK_IMPLIES(heap_object.IsFixedArray(), heap_object.IsHashTable());
+MaybeObjectHandle NexusConfig::NewHandle(MaybeObject object) const {
+ if (mode() == Mode::MainThread) {
+ return handle(object, isolate_);
}
-#endif
+ DCHECK_EQ(mode(), Mode::BackgroundThread);
+ return handle(object, local_heap_);
+}
+
+template <typename T>
+Handle<T> NexusConfig::NewHandle(T object) const {
+ if (mode() == Mode::MainThread) {
+ return handle(object, isolate_);
+ }
+ DCHECK_EQ(mode(), Mode::BackgroundThread);
+ return handle(object, local_heap_);
+}
+
+void NexusConfig::SetFeedbackPair(FeedbackVector vector,
+ FeedbackSlot start_slot, MaybeObject feedback,
+ WriteBarrierMode mode,
+ MaybeObject feedback_extra,
+ WriteBarrierMode mode_extra) const {
+ CHECK(can_write());
+ CHECK_GT(vector.length(), start_slot.WithOffset(1).ToInt());
+ base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
+ isolate()->feedback_vector_access());
+ vector.Set(start_slot, feedback, mode);
+ vector.Set(start_slot.WithOffset(1), feedback_extra, mode_extra);
+}
+
+std::pair<MaybeObject, MaybeObject> NexusConfig::GetFeedbackPair(
+ FeedbackVector vector, FeedbackSlot slot) const {
+ if (mode() == BackgroundThread) {
+ isolate()->feedback_vector_access()->LockShared();
+ }
+ MaybeObject feedback = vector.Get(slot);
+ MaybeObject feedback_extra = vector.Get(slot.WithOffset(1));
+ auto return_value = std::make_pair(feedback, feedback_extra);
+ if (mode() == BackgroundThread) {
+ isolate()->feedback_vector_access()->UnlockShared();
+ }
+ return return_value;
+}
+
+FeedbackNexus::FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : vector_handle_(vector),
+ slot_(slot),
+ config_(NexusConfig::FromMainThread(
+ vector.is_null() ? nullptr : vector->GetIsolate())) {
+ kind_ = vector.is_null() ? FeedbackSlotKind::kInvalid : vector->GetKind(slot);
+}
+
+FeedbackNexus::FeedbackNexus(FeedbackVector vector, FeedbackSlot slot)
+ : vector_(vector),
+ slot_(slot),
+ config_(NexusConfig::FromMainThread(
+ vector.is_null() ? nullptr : vector.GetIsolate())) {
+ kind_ = vector.is_null() ? FeedbackSlotKind::kInvalid : vector.GetKind(slot);
}
+FeedbackNexus::FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot,
+ const NexusConfig& config)
+ : vector_handle_(vector),
+ slot_(slot),
+ kind_(vector->GetKind(slot)),
+ config_(config) {}
+
Handle<WeakFixedArray> FeedbackNexus::CreateArrayOfSize(int length) {
- Isolate* isolate = GetIsolate();
- Handle<WeakFixedArray> array = isolate->factory()->NewWeakFixedArray(length);
+ DCHECK(config()->can_write());
+ Handle<WeakFixedArray> array =
+ GetIsolate()->factory()->NewWeakFixedArray(length);
return array;
}
@@ -465,21 +553,18 @@ void FeedbackNexus::ConfigureUninitialized() {
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
SetFeedback(HeapObjectReference::ClearedValue(isolate),
+ SKIP_WRITE_BARRIER, UninitializedSentinel(),
SKIP_WRITE_BARRIER);
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
break;
}
case FeedbackSlotKind::kCloneObject:
case FeedbackSlotKind::kCall: {
- SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
+ SetFeedback(UninitializedSentinel(), SKIP_WRITE_BARRIER, Smi::zero(),
SKIP_WRITE_BARRIER);
- SetFeedbackExtra(Smi::zero(), SKIP_WRITE_BARRIER);
break;
}
case FeedbackSlotKind::kInstanceOf: {
- SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
+ SetFeedback(UninitializedSentinel(), SKIP_WRITE_BARRIER);
break;
}
case FeedbackSlotKind::kStoreNamedSloppy:
@@ -492,10 +577,8 @@ void FeedbackNexus::ConfigureUninitialized() {
case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kHasKeyed:
case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
- SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
+ SetFeedback(UninitializedSentinel(), SKIP_WRITE_BARRIER,
+ UninitializedSentinel(), SKIP_WRITE_BARRIER);
break;
}
default:
@@ -555,11 +638,10 @@ bool FeedbackNexus::Clear() {
bool FeedbackNexus::ConfigureMegamorphic() {
DisallowHeapAllocation no_gc;
Isolate* isolate = GetIsolate();
- MaybeObject sentinel =
- MaybeObject::FromObject(*FeedbackVector::MegamorphicSentinel(isolate));
+ MaybeObject sentinel = MegamorphicSentinel();
if (GetFeedback() != sentinel) {
- SetFeedback(sentinel, SKIP_WRITE_BARRIER);
- SetFeedbackExtra(HeapObjectReference::ClearedValue(isolate));
+ SetFeedback(sentinel, SKIP_WRITE_BARRIER,
+ HeapObjectReference::ClearedValue(isolate));
return true;
}
@@ -568,21 +650,17 @@ bool FeedbackNexus::ConfigureMegamorphic() {
bool FeedbackNexus::ConfigureMegamorphic(IcCheckType property_type) {
DisallowHeapAllocation no_gc;
- Isolate* isolate = GetIsolate();
- bool changed = false;
- MaybeObject sentinel =
- MaybeObject::FromObject(*FeedbackVector::MegamorphicSentinel(isolate));
- if (GetFeedback() != sentinel) {
- SetFeedback(sentinel, SKIP_WRITE_BARRIER);
- changed = true;
- }
-
- Smi extra = Smi::FromInt(static_cast<int>(property_type));
- if (changed || GetFeedbackExtra() != MaybeObject::FromSmi(extra)) {
- SetFeedbackExtra(extra, SKIP_WRITE_BARRIER);
- changed = true;
+ MaybeObject sentinel = MegamorphicSentinel();
+ MaybeObject maybe_extra =
+ MaybeObject::FromSmi(Smi::FromInt(static_cast<int>(property_type)));
+
+ auto feedback = GetFeedbackPair();
+ bool update_required =
+ feedback.first != sentinel || feedback.second != maybe_extra;
+ if (update_required) {
+ SetFeedback(sentinel, SKIP_WRITE_BARRIER, maybe_extra, SKIP_WRITE_BARRIER);
}
- return changed;
+ return update_required;
}
Map FeedbackNexus::GetFirstMap() const {
@@ -595,8 +673,8 @@ Map FeedbackNexus::GetFirstMap() const {
}
InlineCacheState FeedbackNexus::ic_state() const {
- Isolate* isolate = GetIsolate();
- MaybeObject feedback = GetFeedback();
+ MaybeObject feedback, extra;
+ std::tie(feedback, extra) = GetFeedbackPair();
switch (kind()) {
case FeedbackSlotKind::kLiteral:
@@ -610,10 +688,7 @@ InlineCacheState FeedbackNexus::ic_state() const {
if (feedback->IsSmi()) return MONOMORPHIC;
DCHECK(feedback->IsWeakOrCleared());
- MaybeObject extra = GetFeedbackExtra();
- if (!feedback->IsCleared() ||
- extra != MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (!feedback->IsCleared() || extra != UninitializedSentinel()) {
return MONOMORPHIC;
}
return UNINITIALIZED;
@@ -628,12 +703,10 @@ InlineCacheState FeedbackNexus::ic_state() const {
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kHasKeyed: {
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
return UNINITIALIZED;
}
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::MegamorphicSentinel(isolate))) {
+ if (feedback == MegamorphicSentinel()) {
return MEGAMORPHIC;
}
if (feedback->IsWeakOrCleared()) {
@@ -650,8 +723,8 @@ InlineCacheState FeedbackNexus::ic_state() const {
if (heap_object.IsName()) {
DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
IsKeyedHasICKind(kind()));
- Object extra = GetFeedbackExtra()->GetHeapObjectAssumeStrong();
- WeakFixedArray extra_array = WeakFixedArray::cast(extra);
+ Object extra_object = extra->GetHeapObjectAssumeStrong();
+ WeakFixedArray extra_array = WeakFixedArray::cast(extra_object);
return extra_array.length() > 2 ? POLYMORPHIC : MONOMORPHIC;
}
}
@@ -659,8 +732,7 @@ InlineCacheState FeedbackNexus::ic_state() const {
}
case FeedbackSlotKind::kCall: {
HeapObject heap_object;
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::MegamorphicSentinel(isolate))) {
+ if (feedback == MegamorphicSentinel()) {
return GENERIC;
} else if (feedback->IsWeakOrCleared()) {
if (feedback->GetHeapObjectIfWeak(&heap_object)) {
@@ -675,8 +747,7 @@ InlineCacheState FeedbackNexus::ic_state() const {
return MONOMORPHIC;
}
- CHECK_EQ(feedback, MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate)));
+ CHECK_EQ(feedback, UninitializedSentinel());
return UNINITIALIZED;
}
case FeedbackSlotKind::kBinaryOp: {
@@ -709,19 +780,15 @@ InlineCacheState FeedbackNexus::ic_state() const {
return MONOMORPHIC;
}
case FeedbackSlotKind::kInstanceOf: {
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
return UNINITIALIZED;
- } else if (feedback ==
- MaybeObject::FromObject(
- *FeedbackVector::MegamorphicSentinel(isolate))) {
+ } else if (feedback == MegamorphicSentinel()) {
return MEGAMORPHIC;
}
return MONOMORPHIC;
}
case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
return UNINITIALIZED;
} else if (feedback->IsWeakOrCleared()) {
// Don't check if the map is cleared.
@@ -731,20 +798,17 @@ InlineCacheState FeedbackNexus::ic_state() const {
return MEGAMORPHIC;
}
case FeedbackSlotKind::kTypeProfile: {
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
return UNINITIALIZED;
}
return MONOMORPHIC;
}
case FeedbackSlotKind::kCloneObject: {
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
return UNINITIALIZED;
}
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::MegamorphicSentinel(isolate))) {
+ if (feedback == MegamorphicSentinel()) {
return MEGAMORPHIC;
}
if (feedback->IsWeakOrCleared()) {
@@ -764,10 +828,8 @@ InlineCacheState FeedbackNexus::ic_state() const {
void FeedbackNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
DCHECK(IsGlobalICKind(kind()));
- Isolate* isolate = GetIsolate();
- SetFeedback(HeapObjectReference::Weak(*cell));
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
+ SetFeedback(HeapObjectReference::Weak(*cell), UPDATE_WRITE_BARRIER,
+ UninitializedSentinel(), SKIP_WRITE_BARRIER);
}
bool FeedbackNexus::ConfigureLexicalVarMode(int script_context_index,
@@ -785,22 +847,21 @@ bool FeedbackNexus::ConfigureLexicalVarMode(int script_context_index,
SlotIndexBits::encode(context_slot_index) |
ImmutabilityBit::encode(immutable);
- SetFeedback(Smi::From31BitPattern(config));
- Isolate* isolate = GetIsolate();
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
+ SetFeedback(Smi::From31BitPattern(config), SKIP_WRITE_BARRIER,
+ UninitializedSentinel(), SKIP_WRITE_BARRIER);
return true;
}
void FeedbackNexus::ConfigureHandlerMode(const MaybeObjectHandle& handler) {
DCHECK(IsGlobalICKind(kind()));
DCHECK(IC::IsHandler(*handler));
- SetFeedback(HeapObjectReference::ClearedValue(GetIsolate()));
- SetFeedbackExtra(*handler);
+ SetFeedback(HeapObjectReference::ClearedValue(GetIsolate()),
+ UPDATE_WRITE_BARRIER, *handler, UPDATE_WRITE_BARRIER);
}
void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
Handle<Map> result_map) {
+ DCHECK(config()->can_write());
Isolate* isolate = GetIsolate();
Handle<HeapObject> feedback;
{
@@ -814,14 +875,14 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
switch (ic_state()) {
case UNINITIALIZED:
// Cache the first map seen which meets the fast case requirements.
- SetFeedback(HeapObjectReference::Weak(*source_map));
- SetFeedbackExtra(*result_map);
+ SetFeedback(HeapObjectReference::Weak(*source_map), UPDATE_WRITE_BARRIER,
+ *result_map);
break;
case MONOMORPHIC:
if (feedback.is_null() || feedback.is_identical_to(source_map) ||
Map::cast(*feedback).is_deprecated()) {
- SetFeedback(HeapObjectReference::Weak(*source_map));
- SetFeedbackExtra(*result_map);
+ SetFeedback(HeapObjectReference::Weak(*source_map),
+ UPDATE_WRITE_BARRIER, *result_map);
} else {
// Transition to POLYMORPHIC.
Handle<WeakFixedArray> array =
@@ -830,8 +891,8 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
array->Set(1, GetFeedbackExtra());
array->Set(2, HeapObjectReference::Weak(*source_map));
array->Set(3, MaybeObject::FromObject(*result_map));
- SetFeedback(*array);
- SetFeedbackExtra(HeapObjectReference::ClearedValue(isolate));
+ SetFeedback(*array, UPDATE_WRITE_BARRIER,
+ HeapObjectReference::ClearedValue(isolate));
}
break;
case POLYMORPHIC: {
@@ -852,10 +913,9 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
if (i >= array->length()) {
if (i == kMaxElements) {
// Transition to MEGAMORPHIC.
- MaybeObject sentinel = MaybeObject::FromObject(
- *FeedbackVector::MegamorphicSentinel(isolate));
- SetFeedback(sentinel, SKIP_WRITE_BARRIER);
- SetFeedbackExtra(HeapObjectReference::ClearedValue(isolate));
+ MaybeObject sentinel = MegamorphicSentinel();
+ SetFeedback(sentinel, SKIP_WRITE_BARRIER,
+ HeapObjectReference::ClearedValue(isolate));
break;
}
@@ -896,7 +956,10 @@ void FeedbackNexus::SetSpeculationMode(SpeculationMode mode) {
uint32_t count = static_cast<uint32_t>(Smi::ToInt(call_count));
uint32_t value = CallCountField::encode(CallCountField::decode(count));
int result = static_cast<int>(value | SpeculationModeField::encode(mode));
- SetFeedbackExtra(Smi::FromInt(result), SKIP_WRITE_BARRIER);
+ MaybeObject feedback = GetFeedback();
+ // We can skip the write barrier for {feedback} because it's not changing.
+ SetFeedback(feedback, SKIP_WRITE_BARRIER, Smi::FromInt(result),
+ SKIP_WRITE_BARRIER);
}
SpeculationMode FeedbackNexus::GetSpeculationMode() {
@@ -924,18 +987,17 @@ void FeedbackNexus::ConfigureMonomorphic(Handle<Name> name,
const MaybeObjectHandle& handler) {
DCHECK(handler.is_null() || IC::IsHandler(*handler));
if (kind() == FeedbackSlotKind::kStoreDataPropertyInLiteral) {
- SetFeedback(HeapObjectReference::Weak(*receiver_map));
- SetFeedbackExtra(*name);
+ SetFeedback(HeapObjectReference::Weak(*receiver_map), UPDATE_WRITE_BARRIER,
+ *name);
} else {
if (name.is_null()) {
- SetFeedback(HeapObjectReference::Weak(*receiver_map));
- SetFeedbackExtra(*handler);
+ SetFeedback(HeapObjectReference::Weak(*receiver_map),
+ UPDATE_WRITE_BARRIER, *handler);
} else {
Handle<WeakFixedArray> array = CreateArrayOfSize(2);
- SetFeedback(*name);
array->Set(0, HeapObjectReference::Weak(*receiver_map));
array->Set(1, *handler);
- SetFeedbackExtra(*array);
+ SetFeedback(*name, UPDATE_WRITE_BARRIER, *array);
}
}
}
@@ -953,22 +1015,20 @@ void FeedbackNexus::ConfigurePolymorphic(
DCHECK(IC::IsHandler(*handler));
array->Set(current * 2 + 1, *handler);
}
+
if (name.is_null()) {
- SetFeedback(*array);
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(GetIsolate()),
- SKIP_WRITE_BARRIER);
+ SetFeedback(*array, UPDATE_WRITE_BARRIER, UninitializedSentinel(),
+ SKIP_WRITE_BARRIER);
} else {
- SetFeedback(*name);
- SetFeedbackExtra(*array);
+ SetFeedback(*name, UPDATE_WRITE_BARRIER, *array);
}
}
int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
DisallowHeapAllocation no_gc;
- Isolate* isolate = GetIsolate();
int found = 0;
for (FeedbackIterator it(this); !it.done(); it.Advance()) {
- maps->push_back(handle(it.map(), isolate));
+ maps->push_back(config()->NewHandle(it.map()));
found++;
}
@@ -978,16 +1038,15 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
int FeedbackNexus::ExtractMapsAndFeedback(
std::vector<MapAndFeedback>* maps_and_feedback) const {
DisallowHeapAllocation no_gc;
- Isolate* isolate = GetIsolate();
int found = 0;
for (FeedbackIterator it(this); !it.done(); it.Advance()) {
- Handle<Map> map = handle(it.map(), isolate);
+ Handle<Map> map = config()->NewHandle(it.map());
MaybeObject maybe_handler = it.handler();
if (!maybe_handler->IsCleared()) {
DCHECK(IC::IsHandler(maybe_handler) ||
IsStoreDataPropertyInLiteralKind(kind()));
- MaybeObjectHandle handler = handle(maybe_handler, isolate);
+ MaybeObjectHandle handler = config()->NewHandle(maybe_handler);
maps_and_feedback->push_back(MapAndHandler(map, handler));
found++;
}
@@ -998,20 +1057,18 @@ int FeedbackNexus::ExtractMapsAndFeedback(
int FeedbackNexus::ExtractMapsAndHandlers(
std::vector<MapAndHandler>* maps_and_handlers,
- bool try_update_deprecated) const {
+ TryUpdateHandler map_handler) const {
DCHECK(!IsStoreDataPropertyInLiteralKind(kind()));
DisallowHeapAllocation no_gc;
- Isolate* isolate = GetIsolate();
int found = 0;
for (FeedbackIterator it(this); !it.done(); it.Advance()) {
- Handle<Map> map = handle(it.map(), isolate);
+ Handle<Map> map = config()->NewHandle(it.map());
MaybeObject maybe_handler = it.handler();
if (!maybe_handler->IsCleared()) {
DCHECK(IC::IsHandler(maybe_handler));
- MaybeObjectHandle handler = handle(maybe_handler, isolate);
- if (try_update_deprecated &&
- !Map::TryUpdate(isolate, map).ToHandle(&map)) {
+ MaybeObjectHandle handler = config()->NewHandle(maybe_handler);
+ if (map_handler && !(map_handler(map).ToHandle(&map))) {
continue;
}
maps_and_handlers->push_back(MapAndHandler(map, handler));
@@ -1027,7 +1084,7 @@ MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
for (FeedbackIterator it(this); !it.done(); it.Advance()) {
if (it.map() == *map && !it.handler()->IsCleared()) {
- return handle(it.handler(), GetIsolate());
+ return config()->NewHandle(it.handler());
}
}
return MaybeObjectHandle();
@@ -1174,14 +1231,14 @@ IcCheckType FeedbackNexus::GetKeyType() const {
DCHECK(IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()) ||
IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()) ||
IsStoreDataPropertyInLiteralKind(kind()));
- MaybeObject feedback = GetFeedback();
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::MegamorphicSentinel(GetIsolate()))) {
+ auto pair = GetFeedbackPair();
+ MaybeObject feedback = pair.first;
+ if (feedback == MegamorphicSentinel()) {
return static_cast<IcCheckType>(
- Smi::ToInt(GetFeedbackExtra()->cast<Object>()));
+ Smi::ToInt(pair.second->template cast<Object>()));
}
MaybeObject maybe_name =
- IsStoreDataPropertyInLiteralKind(kind()) ? GetFeedbackExtra() : feedback;
+ IsStoreDataPropertyInLiteralKind(kind()) ? pair.second : feedback;
return IsPropertyNameFeedback(maybe_name) ? PROPERTY : ELEMENT;
}
@@ -1200,16 +1257,15 @@ CompareOperationHint FeedbackNexus::GetCompareOperationFeedback() const {
ForInHint FeedbackNexus::GetForInFeedback() const {
DCHECK_EQ(kind(), FeedbackSlotKind::kForIn);
int feedback = GetFeedback().ToSmi().value();
- return ForInHintFromFeedback(feedback);
+ return ForInHintFromFeedback(static_cast<ForInFeedback>(feedback));
}
MaybeHandle<JSObject> FeedbackNexus::GetConstructorFeedback() const {
DCHECK_EQ(kind(), FeedbackSlotKind::kInstanceOf);
- Isolate* isolate = GetIsolate();
MaybeObject feedback = GetFeedback();
HeapObject heap_object;
if (feedback->GetHeapObjectIfWeak(&heap_object)) {
- return handle(JSObject::cast(heap_object), isolate);
+ return config()->NewHandle(JSObject::cast(heap_object));
}
return MaybeHandle<JSObject>();
}
@@ -1230,6 +1286,7 @@ bool InList(Handle<ArrayList> types, Handle<String> type) {
void FeedbackNexus::Collect(Handle<String> type, int position) {
DCHECK(IsTypeProfileKind(kind()));
DCHECK_GE(position, 0);
+ DCHECK(config()->can_write());
Isolate* isolate = GetIsolate();
MaybeObject const feedback = GetFeedback();
@@ -1237,8 +1294,7 @@ void FeedbackNexus::Collect(Handle<String> type, int position) {
// Map source position to collection of types
Handle<SimpleNumberDictionary> types;
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
types = SimpleNumberDictionary::New(isolate, 1);
} else {
types = handle(
@@ -1274,8 +1330,7 @@ std::vector<int> FeedbackNexus::GetSourcePositions() const {
MaybeObject const feedback = GetFeedback();
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
return source_positions;
}
@@ -1302,8 +1357,7 @@ std::vector<Handle<String>> FeedbackNexus::GetTypesForSourcePositions(
MaybeObject const feedback = GetFeedback();
std::vector<Handle<String>> types_for_position;
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
return types_for_position;
}
@@ -1361,8 +1415,7 @@ JSObject FeedbackNexus::GetTypeProfile() const {
MaybeObject const feedback = GetFeedback();
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
return *isolate->factory()->NewJSObject(isolate->object_function());
}
@@ -1374,7 +1427,7 @@ JSObject FeedbackNexus::GetTypeProfile() const {
void FeedbackNexus::ResetTypeProfile() {
DCHECK(IsTypeProfileKind(kind()));
- SetFeedback(*FeedbackVector::UninitializedSentinel(GetIsolate()));
+ SetFeedback(UninitializedSentinel());
}
FeedbackIterator::FeedbackIterator(const FeedbackNexus* nexus)
@@ -1387,8 +1440,8 @@ FeedbackIterator::FeedbackIterator(const FeedbackNexus* nexus)
IsKeyedHasICKind(nexus->kind()));
DisallowHeapAllocation no_gc;
- Isolate* isolate = nexus->GetIsolate();
- MaybeObject feedback = nexus->GetFeedback();
+ auto pair = nexus->GetFeedbackPair();
+ MaybeObject feedback = pair.first;
bool is_named_feedback = IsPropertyNameFeedback(feedback);
HeapObject heap_object;
@@ -1399,18 +1452,16 @@ FeedbackIterator::FeedbackIterator(const FeedbackNexus* nexus)
state_ = kPolymorphic;
heap_object = feedback->GetHeapObjectAssumeStrong();
if (is_named_feedback) {
- polymorphic_feedback_ =
- handle(WeakFixedArray::cast(
- nexus->GetFeedbackExtra()->GetHeapObjectAssumeStrong()),
- isolate);
+ polymorphic_feedback_ = nexus->config()->NewHandle(
+ WeakFixedArray::cast(pair.second->GetHeapObjectAssumeStrong()));
} else {
polymorphic_feedback_ =
- handle(WeakFixedArray::cast(heap_object), isolate);
+ nexus->config()->NewHandle(WeakFixedArray::cast(heap_object));
}
AdvancePolymorphic();
} else if (feedback->GetHeapObjectIfWeak(&heap_object)) {
state_ = kMonomorphic;
- MaybeObject handler = nexus->GetFeedbackExtra();
+ MaybeObject handler = pair.second;
map_ = Map::cast(heap_object);
handler_ = handler;
} else {
diff --git a/deps/v8/src/objects/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h
index 677ec22457..6c5b9b97ab 100644
--- a/deps/v8/src/objects/feedback-vector.h
+++ b/deps/v8/src/objects/feedback-vector.h
@@ -17,7 +17,6 @@
#include "src/objects/name.h"
#include "src/objects/type-hints.h"
#include "src/zone/zone-containers.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -27,7 +26,7 @@ namespace internal {
class IsCompiledScope;
-enum class FeedbackSlotKind {
+enum class FeedbackSlotKind : uint8_t {
// This kind means that the slot points to the middle of other slot
// which occupies more than one feedback vector element.
// There must be no such slots in the system.
@@ -153,6 +152,8 @@ using MaybeObjectHandles = std::vector<MaybeObjectHandle>;
class FeedbackMetadata;
+#include "torque-generated/src/objects/feedback-vector-tq.inc"
+
// ClosureFeedbackCellArray is a FixedArray that contains feedback cells used
// when creating closures from a function. This is created once the function is
// compiled and is either held by the feedback vector (if allocated) or by the
@@ -174,6 +175,8 @@ class ClosureFeedbackCellArray : public FixedArray {
OBJECT_CONSTRUCTORS(ClosureFeedbackCellArray, FixedArray);
};
+class NexusConfig;
+
// A FeedbackVector has a fixed header with:
// - shared function info (which includes feedback metadata)
// - invocation count
@@ -185,6 +188,20 @@ class FeedbackVector
: public TorqueGeneratedFeedbackVector<FeedbackVector, HeapObject> {
public:
NEVER_READ_ONLY_SPACE
+ DEFINE_TORQUE_GENERATED_FEEDBACK_VECTOR_FLAGS()
+ STATIC_ASSERT(OptimizationMarker::kLastOptimizationMarker <
+ OptimizationMarkerBits::kMax);
+ STATIC_ASSERT(OptimizationTier::kLastOptimizationTier <
+ OptimizationTierBits::kMax);
+
+ static constexpr uint32_t kHasCompileOptimizedOrLogFirstExecutionMarker =
+ kNoneOrInOptimizationQueueMask << OptimizationMarkerBits::kShift;
+ static constexpr uint32_t kHasNoTopTierCodeOrCompileOptimizedMarkerMask =
+ kNoneOrMidTierMask << OptimizationTierBits::kShift |
+ kHasCompileOptimizedOrLogFirstExecutionMarker;
+ static constexpr uint32_t kHasOptimizedCodeOrCompileOptimizedMarkerMask =
+ OptimizationTierBits::kMask |
+ kHasCompileOptimizedOrLogFirstExecutionMarker;
inline bool is_empty() const;
@@ -193,21 +210,21 @@ class FeedbackVector
// Increment profiler ticks, saturating at the maximal value.
void SaturatingIncrementProfilerTicks();
- // Initialize the padding if necessary.
- inline void clear_padding();
-
inline void clear_invocation_count();
inline Code optimized_code() const;
- inline OptimizationMarker optimization_marker() const;
inline bool has_optimized_code() const;
inline bool has_optimization_marker() const;
+ inline OptimizationMarker optimization_marker() const;
+ inline OptimizationTier optimization_tier() const;
void ClearOptimizedCode();
void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo shared,
const char* reason);
static void SetOptimizedCode(Handle<FeedbackVector> vector,
Handle<Code> code);
void SetOptimizationMarker(OptimizationMarker marker);
+ void ClearOptimizationTier();
+ void InitializeOptimizationState();
// Clears the optimization marker in the feedback vector.
void ClearOptimizationMarker();
@@ -217,12 +234,15 @@ class FeedbackVector
// Conversion from an integer index to the underlying array to a slot.
static inline FeedbackSlot ToSlot(intptr_t index);
+
+ inline MaybeObject SynchronizedGet(FeedbackSlot slot) const;
+ inline void SynchronizedSet(FeedbackSlot slot, MaybeObject value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void SynchronizedSet(FeedbackSlot slot, Object value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
inline MaybeObject Get(FeedbackSlot slot) const;
- inline MaybeObject Get(const Isolate* isolate, FeedbackSlot slot) const;
- inline void Set(FeedbackSlot slot, MaybeObject value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void Set(FeedbackSlot slot, Object value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline MaybeObject Get(IsolateRoot isolate, FeedbackSlot slot) const;
// Returns the feedback cell at |index| that is used to create the
// closure.
@@ -271,8 +291,6 @@ class FeedbackVector
return GetLanguageModeFromSlotKind(GetKind(slot));
}
- V8_EXPORT_PRIVATE static void AssertNoLegacyTypes(MaybeObject object);
-
DECL_PRINTER(FeedbackVector)
void FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot); // NOLINT
@@ -283,9 +301,6 @@ class FeedbackVector
// The object that indicates an uninitialized cache.
static inline Handle<Symbol> UninitializedSentinel(Isolate* isolate);
- // The object that indicates a generic state.
- static inline Handle<Symbol> GenericSentinel(Isolate* isolate);
-
// The object that indicates a megamorphic state.
static inline Handle<Symbol> MegamorphicSentinel(Isolate* isolate);
@@ -308,26 +323,38 @@ class FeedbackVector
static void AddToVectorsForProfilingTools(Isolate* isolate,
Handle<FeedbackVector> vector);
+ // Private for initializing stores in FeedbackVector::New().
+ inline void Set(FeedbackSlot slot, MaybeObject value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void Set(FeedbackSlot slot, Object value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+#ifdef DEBUG
+ // Returns true if value is a non-HashTable FixedArray. We want to
+ // make sure not to store such objects in the vector.
+ inline static bool IsOfLegacyType(MaybeObject value);
+#endif // DEBUG
+
+ // NexusConfig controls setting slots in the vector.
+ friend NexusConfig;
+
// Don't expose the raw feedback slot getter/setter.
using TorqueGeneratedFeedbackVector::raw_feedback_slots;
};
class V8_EXPORT_PRIVATE FeedbackVectorSpec {
public:
- explicit FeedbackVectorSpec(Zone* zone)
- : slot_kinds_(zone), num_closure_feedback_cells_(0) {
+ explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone) {
slot_kinds_.reserve(16);
}
- int slots() const { return static_cast<int>(slot_kinds_.size()); }
- int closure_feedback_cells() const { return num_closure_feedback_cells_; }
+ int slot_count() const { return static_cast<int>(slot_kinds_.size()); }
+ int create_closure_slot_count() const { return create_closure_slot_count_; }
- int AddFeedbackCellForCreateClosure() {
- return num_closure_feedback_cells_++;
- }
+ int AddCreateClosureSlot() { return create_closure_slot_count_++; }
FeedbackSlotKind GetKind(FeedbackSlot slot) const {
- return static_cast<FeedbackSlotKind>(slot_kinds_.at(slot.ToInt()));
+ return slot_kinds_.at(slot.ToInt());
}
bool HasTypeProfileSlot() const;
@@ -428,12 +455,11 @@ class V8_EXPORT_PRIVATE FeedbackVectorSpec {
private:
FeedbackSlot AddSlot(FeedbackSlotKind kind);
- void append(FeedbackSlotKind kind) {
- slot_kinds_.push_back(static_cast<unsigned char>(kind));
- }
+ void append(FeedbackSlotKind kind) { slot_kinds_.push_back(kind); }
- ZoneVector<unsigned char> slot_kinds_;
- unsigned int num_closure_feedback_cells_;
+ STATIC_ASSERT(sizeof(FeedbackSlotKind) == sizeof(uint8_t));
+ ZoneVector<FeedbackSlotKind> slot_kinds_;
+ int create_closure_slot_count_ = 0;
friend class SharedFeedbackSlot;
};
@@ -472,7 +498,7 @@ class FeedbackMetadata : public HeapObject {
// int32.
// TODO(mythria): Consider using 16 bits for this and slot_count so that we
// can save 4 bytes.
- DECL_INT32_ACCESSORS(closure_feedback_cell_count)
+ DECL_INT32_ACCESSORS(create_closure_slot_count)
// Get slot_count using an acquire load.
inline int32_t synchronized_slot_count() const;
@@ -505,9 +531,13 @@ class FeedbackMetadata : public HeapObject {
return OBJECT_POINTER_ALIGN(kHeaderSize + length(slot_count) * kInt32Size);
}
- static const int kSlotCountOffset = HeapObject::kHeaderSize;
- static const int kFeedbackCellCountOffset = kSlotCountOffset + kInt32Size;
- static const int kHeaderSize = kFeedbackCellCountOffset + kInt32Size;
+#define FIELDS(V) \
+ V(kSlotCountOffset, kInt32Size) \
+ V(kCreateClosureSlotCountOffset, kInt32Size) \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, FIELDS)
+#undef FIELDS
class BodyDescriptor;
@@ -587,20 +617,79 @@ class FeedbackMetadataIterator {
FeedbackSlotKind slot_kind_;
};
-// A FeedbackNexus is the combination of a FeedbackVector and a slot.
-class V8_EXPORT_PRIVATE FeedbackNexus final {
+// NexusConfig adapts the FeedbackNexus to be used on the main thread
+// or a background thread. It controls the actual read and writes of
+// the underlying feedback vector, manages the creation of handles, and
+// expresses capabilities available in the very different contexts of
+// main and background thread. Here are the differences:
+//
+// Capability: MainThread BackgroundThread
+// Write to vector Allowed Not allowed
+// Handle creation Via Isolate Via LocalHeap
+// Reads of vector "Live" Cached after initial read
+// Thread safety Exclusive write, Shared read only
+// shared read
+class V8_EXPORT_PRIVATE NexusConfig {
public:
- FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : vector_handle_(vector), slot_(slot) {
- kind_ =
- (vector.is_null()) ? FeedbackSlotKind::kInvalid : vector->GetKind(slot);
+ static NexusConfig FromMainThread(Isolate* isolate) {
+ return NexusConfig(isolate);
}
- FeedbackNexus(FeedbackVector vector, FeedbackSlot slot)
- : vector_(vector), slot_(slot) {
- kind_ =
- (vector.is_null()) ? FeedbackSlotKind::kInvalid : vector.GetKind(slot);
+
+ static NexusConfig FromBackgroundThread(Isolate* isolate,
+ LocalHeap* local_heap) {
+ return NexusConfig(isolate, local_heap);
+ }
+
+ enum Mode { MainThread, BackgroundThread };
+
+ Mode mode() const {
+ return local_heap_ == nullptr ? MainThread : BackgroundThread;
}
+ Isolate* isolate() const { return isolate_; }
+
+ MaybeObjectHandle NewHandle(MaybeObject object) const;
+ template <typename T>
+ Handle<T> NewHandle(T object) const;
+
+ bool can_write() const { return mode() == MainThread; }
+
+ inline MaybeObject GetFeedback(FeedbackVector vector,
+ FeedbackSlot slot) const;
+ inline void SetFeedback(FeedbackVector vector, FeedbackSlot slot,
+ MaybeObject object,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER) const;
+
+ std::pair<MaybeObject, MaybeObject> GetFeedbackPair(FeedbackVector vector,
+ FeedbackSlot slot) const;
+ void SetFeedbackPair(FeedbackVector vector, FeedbackSlot start_slot,
+ MaybeObject feedback, WriteBarrierMode mode,
+ MaybeObject feedback_extra,
+ WriteBarrierMode mode_extra) const;
+
+ private:
+ explicit NexusConfig(Isolate* isolate)
+ : isolate_(isolate), local_heap_(nullptr) {}
+ NexusConfig(Isolate* isolate, LocalHeap* local_heap)
+ : isolate_(isolate), local_heap_(local_heap) {}
+
+ Isolate* const isolate_;
+ LocalHeap* const local_heap_;
+};
+
+// A FeedbackNexus is the combination of a FeedbackVector and a slot.
+class V8_EXPORT_PRIVATE FeedbackNexus final {
+ public:
+ // For use on the main thread. A null {vector} is accepted as well.
+ FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot);
+ FeedbackNexus(FeedbackVector vector, FeedbackSlot slot);
+
+ // For use on the main or background thread as configured by {config}.
+ // {vector} must be valid.
+ FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot,
+ const NexusConfig& config);
+
+ const NexusConfig* config() const { return &config_; }
Handle<FeedbackVector> vector_handle() const {
DCHECK(vector_.is_null());
return vector_handle_;
@@ -608,6 +697,7 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
FeedbackVector vector() const {
return vector_handle_.is_null() ? vector_ : *vector_handle_;
}
+
FeedbackSlot slot() const { return slot_; }
FeedbackSlotKind kind() const { return kind_; }
@@ -624,13 +714,14 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
// For map-based ICs (load, keyed-load, store, keyed-store).
Map GetFirstMap() const;
-
int ExtractMaps(MapHandles* maps) const;
// Used to obtain maps and the associated handlers stored in the feedback
- // vector. This should be called when we expect only a handler to be sotred in
- // the extra feedback. This is used by ICs when updting the handlers.
- int ExtractMapsAndHandlers(std::vector<MapAndHandler>* maps_and_handlers,
- bool try_update_deprecated = false) const;
+ // vector. This should be called when we expect only a handler to be stored in
+ // the extra feedback. This is used by ICs when updating the handlers.
+ using TryUpdateHandler = std::function<MaybeHandle<Map>(Handle<Map>)>;
+ int ExtractMapsAndHandlers(
+ std::vector<MapAndHandler>* maps_and_handlers,
+ TryUpdateHandler map_handler = TryUpdateHandler()) const;
MaybeObjectHandle FindHandlerForMap(Handle<Map> map) const;
// Used to obtain maps and the associated feedback stored in the feedback
// vector. The returned feedback need not be always a handler. It could be a
@@ -654,6 +745,7 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
inline MaybeObject GetFeedback() const;
inline MaybeObject GetFeedbackExtra() const;
+ inline std::pair<MaybeObject, MaybeObject> GetFeedbackPair() const;
inline Isolate* GetIsolate() const;
@@ -726,19 +818,25 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
std::vector<int> GetSourcePositions() const;
std::vector<Handle<String>> GetTypesForSourcePositions(uint32_t pos) const;
- inline void SetFeedback(Object feedback,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void SetFeedback(MaybeObject feedback,
+ private:
+ template <typename FeedbackType>
+ inline void SetFeedback(FeedbackType feedback,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void SetFeedbackExtra(Object feedback_extra,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void SetFeedbackExtra(MaybeObject feedback_extra,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ template <typename FeedbackType, typename FeedbackExtraType>
+ inline void SetFeedback(FeedbackType feedback, WriteBarrierMode mode,
+ FeedbackExtraType feedback_extra,
+ WriteBarrierMode mode_extra = UPDATE_WRITE_BARRIER);
+
+ inline MaybeObject UninitializedSentinel() const;
+ inline MaybeObject MegamorphicSentinel() const;
// Create an array. The caller must install it in a feedback vector slot.
Handle<WeakFixedArray> CreateArrayOfSize(int length);
- private:
+ // Helpers to maintain feedback_cache_.
+ inline MaybeObject FromHandle(MaybeObjectHandle slot) const;
+ inline MaybeObjectHandle ToHandle(MaybeObject value) const;
+
// The reason for having a vector handle and a raw pointer is that we can and
// should use handles during IC miss, but not during GC when we clear ICs. If
// you have a handle to the vector that is better because more operations can
@@ -747,6 +845,11 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
FeedbackVector vector_;
FeedbackSlot slot_;
FeedbackSlotKind kind_;
+ // When using the background-thread configuration, a cache is used to
+ // guarantee a consistent view of the feedback to FeedbackNexus methods.
+ mutable base::Optional<std::pair<MaybeObjectHandle, MaybeObjectHandle>>
+ feedback_cache_;
+ NexusConfig config_;
};
class V8_EXPORT_PRIVATE FeedbackIterator final {
@@ -788,7 +891,7 @@ class V8_EXPORT_PRIVATE FeedbackIterator final {
inline BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback);
inline CompareOperationHint CompareOperationHintFromFeedback(int type_feedback);
-inline ForInHint ForInHintFromFeedback(int type_feedback);
+inline ForInHint ForInHintFromFeedback(ForInFeedback type_feedback);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/feedback-vector.tq b/deps/v8/src/objects/feedback-vector.tq
index 794a159ca9..a90d4d363c 100644
--- a/deps/v8/src/objects/feedback-vector.tq
+++ b/deps/v8/src/objects/feedback-vector.tq
@@ -2,18 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+type OptimizationMarker extends uint16 constexpr 'OptimizationMarker';
+type OptimizationTier extends uint16 constexpr 'OptimizationTier';
+
+bitfield struct FeedbackVectorFlags extends uint32 {
+ optimization_marker: OptimizationMarker: 3 bit;
+ optimization_tier: OptimizationTier: 2 bit;
+}
+
@generateBodyDescriptor
@generateCppClass
extern class FeedbackVector extends HeapObject {
const length: int32;
invocation_count: int32;
profiler_ticks: int32;
- // TODO(v8:9287) The padding is not necessary on platforms with 4 bytes
- // tagged pointers, we should make it conditional; however, platform-specific
- // interacts badly with GCMole, so we need to address that first.
- padding: uint32;
+ // TODO(turboprop, v8:11010): This field could be removed by changing the
+ // tier up checks for Turboprop. If removing this field also check v8:9287.
+ // Padding was necessary for GCMole.
+ flags: FeedbackVectorFlags;
shared_function_info: SharedFunctionInfo;
- optimized_code_weak_or_smi: Weak<Code>|Smi;
+ maybe_optimized_code: Weak<Code>;
closure_feedback_cell_array: ClosureFeedbackCellArray;
raw_feedback_slots[length]: MaybeObject;
}
diff --git a/deps/v8/src/objects/field-index-inl.h b/deps/v8/src/objects/field-index-inl.h
index ee1f875104..a3b4c23140 100644
--- a/deps/v8/src/objects/field-index-inl.h
+++ b/deps/v8/src/objects/field-index-inl.h
@@ -61,14 +61,14 @@ int FieldIndex::GetLoadByFieldIndex() const {
}
FieldIndex FieldIndex::ForDescriptor(Map map, InternalIndex descriptor_index) {
- const Isolate* isolate = GetIsolateForPtrCompr(map);
+ IsolateRoot isolate = GetIsolateForPtrCompr(map);
return ForDescriptor(isolate, map, descriptor_index);
}
-FieldIndex FieldIndex::ForDescriptor(const Isolate* isolate, Map map,
+FieldIndex FieldIndex::ForDescriptor(IsolateRoot isolate, Map map,
InternalIndex descriptor_index) {
- PropertyDetails details =
- map.instance_descriptors(isolate).GetDetails(descriptor_index);
+ PropertyDetails details = map.instance_descriptors(isolate, kRelaxedLoad)
+ .GetDetails(descriptor_index);
int field_index = details.field_index();
return ForPropertyIndex(map, field_index, details.representation());
}
diff --git a/deps/v8/src/objects/field-index.h b/deps/v8/src/objects/field-index.h
index 930f12bcda..7819c8c06b 100644
--- a/deps/v8/src/objects/field-index.h
+++ b/deps/v8/src/objects/field-index.h
@@ -31,7 +31,7 @@ class FieldIndex final {
static inline FieldIndex ForInObjectOffset(int offset, Encoding encoding);
static inline FieldIndex ForDescriptor(Map map,
InternalIndex descriptor_index);
- static inline FieldIndex ForDescriptor(const Isolate* isolate, Map map,
+ static inline FieldIndex ForDescriptor(IsolateRoot isolate, Map map,
InternalIndex descriptor_index);
inline int GetLoadByFieldIndex() const;
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index ed2d952b96..547e4dc817 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -20,7 +20,6 @@
#include "src/objects/slots.h"
#include "src/roots/roots-inl.h"
#include "src/sanitizer/tsan.h"
-#include "torque-generated/class-definitions-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -28,6 +27,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/fixed-array-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(FixedArrayBase)
FixedArrayBase::FixedArrayBase(Address ptr,
HeapObject::AllowInlineSmiStorage allow_smi)
@@ -69,11 +70,11 @@ bool FixedArray::ContainsOnlySmisOrHoles() {
}
Object FixedArray::get(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return get(isolate, index);
}
-Object FixedArray::get(const Isolate* isolate, int index) const {
+Object FixedArray::get(IsolateRoot isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
return TaggedField<Object>::Relaxed_Load(isolate, *this,
OffsetOfElementAt(index));
@@ -87,7 +88,6 @@ bool FixedArray::is_the_hole(Isolate* isolate, int index) {
return get(isolate, index).IsTheHole(isolate);
}
-#if !defined(_WIN32) || defined(_WIN64)
void FixedArray::set(int index, Smi value) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
@@ -95,7 +95,6 @@ void FixedArray::set(int index, Smi value) {
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
}
-#endif
void FixedArray::set(int index, Object value) {
DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
@@ -125,11 +124,11 @@ void FixedArray::NoWriteBarrierSet(FixedArray array, int index, Object value) {
}
Object FixedArray::synchronized_get(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return synchronized_get(isolate, index);
}
-Object FixedArray::synchronized_get(const Isolate* isolate, int index) const {
+Object FixedArray::synchronized_get(IsolateRoot isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
return ACQUIRE_READ_FIELD(*this, OffsetOfElementAt(index));
}
@@ -396,8 +395,7 @@ void FixedDoubleArray::MoveElements(Isolate* isolate, int dst_index,
int src_index, int len,
WriteBarrierMode mode) {
DCHECK_EQ(SKIP_WRITE_BARRIER, mode);
- double* data_start =
- reinterpret_cast<double*>(FIELD_ADDR(*this, kHeaderSize));
+ double* data_start = reinterpret_cast<double*>(field_address(kHeaderSize));
MemMove(data_start + dst_index, data_start + src_index, len * kDoubleSize);
}
@@ -408,11 +406,11 @@ void FixedDoubleArray::FillWithHoles(int from, int to) {
}
MaybeObject WeakFixedArray::Get(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return Get(isolate, index);
}
-MaybeObject WeakFixedArray::Get(const Isolate* isolate, int index) const {
+MaybeObject WeakFixedArray::Get(IsolateRoot isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
return objects(isolate, index);
}
@@ -443,11 +441,11 @@ void WeakFixedArray::CopyElements(Isolate* isolate, int dst_index,
}
MaybeObject WeakArrayList::Get(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return Get(isolate, index);
}
-MaybeObject WeakArrayList::Get(const Isolate* isolate, int index) const {
+MaybeObject WeakArrayList::Get(IsolateRoot isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(capacity()));
return objects(isolate, index);
}
@@ -498,7 +496,7 @@ Object ArrayList::Get(int index) const {
return FixedArray::cast(*this).get(kFirstIndex + index);
}
-Object ArrayList::Get(const Isolate* isolate, int index) const {
+Object ArrayList::Get(IsolateRoot isolate, int index) const {
return FixedArray::cast(*this).get(isolate, kFirstIndex + index);
}
@@ -531,14 +529,14 @@ void ByteArray::set(int index, byte value) {
void ByteArray::copy_in(int index, const byte* buffer, int length) {
DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
index + length <= this->length());
- Address dst_addr = FIELD_ADDR(*this, kHeaderSize + index * kCharSize);
+ Address dst_addr = field_address(kHeaderSize + index * kCharSize);
memcpy(reinterpret_cast<void*>(dst_addr), buffer, length);
}
void ByteArray::copy_out(int index, byte* buffer, int length) {
DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
index + length <= this->length());
- Address src_addr = FIELD_ADDR(*this, kHeaderSize + index * kCharSize);
+ Address src_addr = field_address(kHeaderSize + index * kCharSize);
memcpy(buffer, reinterpret_cast<void*>(src_addr), length);
}
@@ -623,7 +621,7 @@ Object TemplateList::get(int index) const {
return FixedArray::cast(*this).get(kFirstElementIndex + index);
}
-Object TemplateList::get(const Isolate* isolate, int index) const {
+Object TemplateList::get(IsolateRoot isolate, int index) const {
return FixedArray::cast(*this).get(isolate, kFirstElementIndex + index);
}
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 13148b2099..97f9297ef7 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -9,7 +9,6 @@
#include "src/objects/instance-type.h"
#include "src/objects/objects.h"
#include "src/objects/smi.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -65,6 +64,8 @@ enum FixedArraySubInstanceType {
LAST_FIXED_ARRAY_SUB_TYPE = WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE
};
+#include "torque-generated/src/objects/fixed-array-tq.inc"
+
// Common superclass for FixedArrays that allow implementations to share
// common accessors and some code paths.
class FixedArrayBase
@@ -100,7 +101,7 @@ class FixedArray
public:
// Setter and getter for elements.
inline Object get(int index) const;
- inline Object get(const Isolate* isolate, int index) const;
+ inline Object get(IsolateRoot isolate, int index) const;
static inline Handle<Object> get(FixedArray array, int index,
Isolate* isolate);
@@ -112,7 +113,7 @@ class FixedArray
// Synchronized setters and getters.
inline Object synchronized_get(int index) const;
- inline Object synchronized_get(const Isolate* isolate, int index) const;
+ inline Object synchronized_get(IsolateRoot isolate, int index) const;
// Currently only Smis are written with release semantics, hence we can avoid
// a write barrier.
inline void synchronized_set(int index, Smi value);
@@ -122,18 +123,7 @@ class FixedArray
inline bool is_the_hole(Isolate* isolate, int index);
// Setter that doesn't need write barrier.
-#if defined(_WIN32) && !defined(_WIN64)
- inline void set(int index, Smi value) {
- DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
- DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- DCHECK(Object(value).IsSmi());
- int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(*this, offset, value);
- }
-#else
inline void set(int index, Smi value);
-#endif
-
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
@@ -278,7 +268,7 @@ class WeakFixedArray
: public TorqueGeneratedWeakFixedArray<WeakFixedArray, HeapObject> {
public:
inline MaybeObject Get(int index) const;
- inline MaybeObject Get(const Isolate* isolate, int index) const;
+ inline MaybeObject Get(IsolateRoot isolate, int index) const;
inline void Set(
int index, MaybeObject value,
@@ -353,7 +343,7 @@ class WeakArrayList
V8_EXPORT_PRIVATE void Compact(Isolate* isolate);
inline MaybeObject Get(int index) const;
- inline MaybeObject Get(const Isolate* isolate, int index) const;
+ inline MaybeObject Get(IsolateRoot isolate, int index) const;
// Set the element at index to obj. The underlying array must be large enough.
// If you need to grow the WeakArrayList, use the static AddToEnd() method
@@ -366,7 +356,7 @@ class WeakArrayList
}
static constexpr int CapacityForLength(int length) {
- return length + Max(length / 2, 2);
+ return length + std::max(length / 2, 2);
}
// Gives access to raw memory which stores the array's data.
@@ -416,6 +406,8 @@ class WeakArrayList
class WeakArrayList::Iterator {
public:
explicit Iterator(WeakArrayList array) : index_(0), array_(array) {}
+ Iterator(const Iterator&) = delete;
+ Iterator& operator=(const Iterator&) = delete;
inline HeapObject Next();
@@ -425,7 +417,6 @@ class WeakArrayList::Iterator {
#ifdef DEBUG
DisallowHeapAllocation no_gc_;
#endif // DEBUG
- DISALLOW_COPY_AND_ASSIGN(Iterator);
};
// Generic array grows dynamically with O(1) amortized insertion.
@@ -454,7 +445,7 @@ class ArrayList : public TorqueGeneratedArrayList<ArrayList, FixedArray> {
// storage capacity, i.e., length().
inline void SetLength(int length);
inline Object Get(int index) const;
- inline Object Get(const Isolate* isolate, int index) const;
+ inline Object Get(IsolateRoot isolate, int index) const;
inline ObjectSlot Slot(int index);
// Set the element at index to obj. The underlying array must be large enough.
@@ -600,7 +591,7 @@ class TemplateList
static Handle<TemplateList> New(Isolate* isolate, int size);
inline int length() const;
inline Object get(int index) const;
- inline Object get(const Isolate* isolate, int index) const;
+ inline Object get(IsolateRoot isolate, int index) const;
inline void set(int index, Object value);
static Handle<TemplateList> Add(Isolate* isolate, Handle<TemplateList> list,
Handle<Object> value);
diff --git a/deps/v8/src/objects/foreign-inl.h b/deps/v8/src/objects/foreign-inl.h
index d455aede1a..cb3dac91eb 100644
--- a/deps/v8/src/objects/foreign-inl.h
+++ b/deps/v8/src/objects/foreign-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/foreign-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(Foreign)
// static
@@ -27,15 +29,17 @@ bool Foreign::IsNormalized(Object value) {
}
DEF_GETTER(Foreign, foreign_address, Address) {
- ExternalPointer_t encoded_value =
- ReadField<ExternalPointer_t>(kForeignAddressOffset);
- Address value = DecodeExternalPointer(isolate, encoded_value);
- return value;
+ return ReadExternalPointerField(kForeignAddressOffset, isolate,
+ kForeignForeignAddressTag);
+}
+
+void Foreign::AllocateExternalPointerEntries(Isolate* isolate) {
+ InitExternalPointerField(kForeignAddressOffset, isolate);
}
void Foreign::set_foreign_address(Isolate* isolate, Address value) {
- ExternalPointer_t encoded_value = EncodeExternalPointer(isolate, value);
- WriteField<ExternalPointer_t>(kForeignAddressOffset, encoded_value);
+ WriteExternalPointerField(kForeignAddressOffset, isolate, value,
+ kForeignForeignAddressTag);
}
} // namespace internal
diff --git a/deps/v8/src/objects/foreign.h b/deps/v8/src/objects/foreign.h
index 2dd869d5ac..ebb219b153 100644
--- a/deps/v8/src/objects/foreign.h
+++ b/deps/v8/src/objects/foreign.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_FOREIGN_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -14,6 +13,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/foreign-tq.inc"
+
// Foreign describes objects pointing from JavaScript to C structures.
class Foreign : public TorqueGeneratedForeign<Foreign, HeapObject> {
public:
@@ -43,6 +44,8 @@ class Foreign : public TorqueGeneratedForeign<Foreign, HeapObject> {
friend class StartupSerializer;
friend class WasmTypeInfo;
+ inline void AllocateExternalPointerEntries(Isolate* isolate);
+
inline void set_foreign_address(Isolate* isolate, Address value);
TQ_OBJECT_CONSTRUCTORS(Foreign)
diff --git a/deps/v8/src/objects/foreign.tq b/deps/v8/src/objects/foreign.tq
index dcf52b12e3..872da31e3b 100644
--- a/deps/v8/src/objects/foreign.tq
+++ b/deps/v8/src/objects/foreign.tq
@@ -7,3 +7,6 @@
extern class Foreign extends HeapObject {
foreign_address: ExternalPointer;
}
+
+extern operator '.foreign_address_ptr' macro LoadForeignForeignAddressPtr(
+ Foreign): RawPtr;
diff --git a/deps/v8/src/objects/free-space-inl.h b/deps/v8/src/objects/free-space-inl.h
index 0b27b3f433..e8ce1d6350 100644
--- a/deps/v8/src/objects/free-space-inl.h
+++ b/deps/v8/src/objects/free-space-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/free-space-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(FreeSpace)
RELAXED_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
diff --git a/deps/v8/src/objects/free-space.h b/deps/v8/src/objects/free-space.h
index 7bed4a1a7c..76d618cbfd 100644
--- a/deps/v8/src/objects/free-space.h
+++ b/deps/v8/src/objects/free-space.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_FREE_SPACE_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -14,6 +13,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/free-space-tq.inc"
+
// FreeSpace are fixed-size free memory blocks used by the heap and GC.
// They look like heap objects (are heap object tagged and have a map) so that
// the heap remains iterable. They have a size and a next pointer.
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index b16b8a796f..4e4e9b9017 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -110,7 +110,7 @@ int HashTableBase::ComputeCapacity(int at_least_space_for) {
// Must be kept in sync with CodeStubAssembler::HashTableComputeCapacity().
int raw_cap = at_least_space_for + (at_least_space_for >> 1);
int capacity = base::bits::RoundUpToPowerOfTwo32(raw_cap);
- return Max(capacity, kMinCapacity);
+ return std::max({capacity, kMinCapacity});
}
void HashTableBase::SetNumberOfElements(int nof) {
@@ -142,8 +142,7 @@ InternalIndex HashTable<Derived, Shape>::FindEntry(LocalIsolate* isolate,
// Find entry for key otherwise return kNotFound.
template <typename Derived, typename Shape>
-template <typename LocalIsolate>
-InternalIndex HashTable<Derived, Shape>::FindEntry(const LocalIsolate* isolate,
+InternalIndex HashTable<Derived, Shape>::FindEntry(IsolateRoot isolate,
ReadOnlyRoots roots, Key key,
int32_t hash) {
uint32_t capacity = Capacity();
@@ -180,8 +179,8 @@ bool HashTable<Derived, Shape>::ToKey(ReadOnlyRoots roots, InternalIndex entry,
}
template <typename Derived, typename Shape>
-bool HashTable<Derived, Shape>::ToKey(const Isolate* isolate,
- InternalIndex entry, Object* out_k) {
+bool HashTable<Derived, Shape>::ToKey(IsolateRoot isolate, InternalIndex entry,
+ Object* out_k) {
Object k = KeyAt(isolate, entry);
if (!IsKey(GetReadOnlyRoots(isolate), k)) return false;
*out_k = Shape::Unwrap(k);
@@ -190,16 +189,14 @@ bool HashTable<Derived, Shape>::ToKey(const Isolate* isolate,
template <typename Derived, typename Shape>
Object HashTable<Derived, Shape>::KeyAt(InternalIndex entry) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return KeyAt(isolate, entry);
}
template <typename Derived, typename Shape>
-template <typename LocalIsolate>
-Object HashTable<Derived, Shape>::KeyAt(const LocalIsolate* isolate,
+Object HashTable<Derived, Shape>::KeyAt(IsolateRoot isolate,
InternalIndex entry) {
- return get(GetIsolateForPtrCompr(isolate),
- EntryToIndex(entry) + kEntryKeyIndex);
+ return get(isolate, EntryToIndex(entry) + kEntryKeyIndex);
}
template <typename Derived, typename Shape>
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index c9dd57a45a..39d8e326f6 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -138,26 +138,24 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
void IterateElements(ObjectVisitor* visitor);
// Find entry for key otherwise return kNotFound.
- template <typename LocalIsolate>
- inline InternalIndex FindEntry(const LocalIsolate* isolate,
- ReadOnlyRoots roots, Key key, int32_t hash);
+ inline InternalIndex FindEntry(IsolateRoot isolate, ReadOnlyRoots roots,
+ Key key, int32_t hash);
template <typename LocalIsolate>
inline InternalIndex FindEntry(LocalIsolate* isolate, Key key);
// Rehashes the table in-place.
- void Rehash(const Isolate* isolate);
+ void Rehash(IsolateRoot isolate);
// Returns whether k is a real key. The hole and undefined are not allowed as
// keys and can be used to indicate missing or deleted elements.
static inline bool IsKey(ReadOnlyRoots roots, Object k);
inline bool ToKey(ReadOnlyRoots roots, InternalIndex entry, Object* out_k);
- inline bool ToKey(const Isolate* isolate, InternalIndex entry, Object* out_k);
+ inline bool ToKey(IsolateRoot isolate, InternalIndex entry, Object* out_k);
// Returns the key at entry.
inline Object KeyAt(InternalIndex entry);
- template <typename LocalIsolate>
- inline Object KeyAt(const LocalIsolate* isolate, InternalIndex entry);
+ inline Object KeyAt(IsolateRoot isolate, InternalIndex entry);
static const int kElementsStartIndex = kPrefixStartIndex + Shape::kPrefixSize;
static const int kEntrySize = Shape::kEntrySize;
@@ -219,7 +217,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
// Find the entry at which to insert element with the given key that
// has the given hash value.
- InternalIndex FindInsertionEntry(const Isolate* isolate, ReadOnlyRoots roots,
+ InternalIndex FindInsertionEntry(IsolateRoot isolate, ReadOnlyRoots roots,
uint32_t hash);
InternalIndex FindInsertionEntry(Isolate* isolate, uint32_t hash);
@@ -233,7 +231,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
Isolate* isolate, Handle<Derived> table, int additionalCapacity = 0);
// Rehashes this hash-table into the new table.
- void Rehash(const Isolate* isolate, Derived new_table);
+ void Rehash(IsolateRoot isolate, Derived new_table);
inline void set_key(int index, Object value);
inline void set_key(int index, Object value, WriteBarrierMode mode);
@@ -324,7 +322,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) ObjectHashTableBase
// returned in case the key is not present.
Object Lookup(Handle<Object> key);
Object Lookup(Handle<Object> key, int32_t hash);
- Object Lookup(const Isolate* isolate, Handle<Object> key, int32_t hash);
+ Object Lookup(IsolateRoot isolate, Handle<Object> key, int32_t hash);
// Returns the value at entry.
Object ValueAt(InternalIndex entry);
diff --git a/deps/v8/src/objects/heap-number-inl.h b/deps/v8/src/objects/heap-number-inl.h
index 4e40fa70b8..97db52a58c 100644
--- a/deps/v8/src/objects/heap-number-inl.h
+++ b/deps/v8/src/objects/heap-number-inl.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/heap-number-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(HeapNumber)
uint64_t HeapNumber::value_as_bits() const {
diff --git a/deps/v8/src/objects/heap-number.h b/deps/v8/src/objects/heap-number.h
index 4e77a52340..311f1437be 100644
--- a/deps/v8/src/objects/heap-number.h
+++ b/deps/v8/src/objects/heap-number.h
@@ -13,6 +13,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/heap-number-tq.inc"
+
// The HeapNumber class describes heap allocated numbers that cannot be
// represented in a Smi (small integer).
class HeapNumber
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
index 2f16236e02..429379d9d3 100644
--- a/deps/v8/src/objects/heap-object.h
+++ b/deps/v8/src/objects/heap-object.h
@@ -6,10 +6,10 @@
#define V8_OBJECTS_HEAP_OBJECT_H_
#include "src/common/globals.h"
-#include "src/roots/roots.h"
-
+#include "src/objects/instance-type.h"
#include "src/objects/objects.h"
#include "src/objects/tagged-field.h"
+#include "src/roots/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -18,6 +18,7 @@ namespace v8 {
namespace internal {
class Heap;
+class PrimitiveHeapObject;
// HeapObject is the superclass for all classes describing heap allocated
// objects.
@@ -45,8 +46,8 @@ class HeapObject : public Object {
// Compare-and-swaps map word using release store, returns true if the map
// word was actually swapped.
- inline bool synchronized_compare_and_swap_map_word(MapWord old_map_word,
- MapWord new_map_word);
+ inline bool release_compare_and_swap_map_word(MapWord old_map_word,
+ MapWord new_map_word);
// Initialize the map immediately after the object is allocated.
// Do not use this outside Heap.
@@ -68,11 +69,11 @@ class HeapObject : public Object {
inline ReadOnlyRoots GetReadOnlyRoots() const;
// This version is intended to be used for the isolate values produced by
// i::GetIsolateForPtrCompr(HeapObject) function which may return nullptr.
- inline ReadOnlyRoots GetReadOnlyRoots(const Isolate* isolate) const;
+ inline ReadOnlyRoots GetReadOnlyRoots(IsolateRoot isolate) const;
#define IS_TYPE_FUNCTION_DECL(Type) \
V8_INLINE bool Is##Type() const; \
- V8_INLINE bool Is##Type(const Isolate* isolate) const;
+ V8_INLINE bool Is##Type(IsolateRoot isolate) const;
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
IS_TYPE_FUNCTION_DECL(HashTableBase)
IS_TYPE_FUNCTION_DECL(SmallOrderedHashTable)
@@ -93,7 +94,7 @@ class HeapObject : public Object {
#define DECL_STRUCT_PREDICATE(NAME, Name, name) \
V8_INLINE bool Is##Name() const; \
- V8_INLINE bool Is##Name(const Isolate* isolate) const;
+ V8_INLINE bool Is##Name(IsolateRoot isolate) const;
STRUCT_LIST(DECL_STRUCT_PREDICATE)
#undef DECL_STRUCT_PREDICATE
@@ -181,6 +182,7 @@ class HeapObject : public Object {
// Whether the object needs rehashing. That is the case if the object's
// content depends on FLAG_hash_seed. When the object is deserialized into
// a heap with a different hash seed, these objects need to adapt.
+ bool NeedsRehashing(InstanceType instance_type) const;
bool NeedsRehashing() const;
// Rehashing support is not implemented for all objects that need rehashing.
diff --git a/deps/v8/src/objects/internal-index.h b/deps/v8/src/objects/internal-index.h
index 130d4d1868..a241f3c686 100644
--- a/deps/v8/src/objects/internal-index.h
+++ b/deps/v8/src/objects/internal-index.h
@@ -45,11 +45,15 @@ class InternalIndex {
return static_cast<int>(entry_);
}
- bool operator==(const InternalIndex& other) { return entry_ == other.entry_; }
+ bool operator==(const InternalIndex& other) const {
+ return entry_ == other.entry_;
+ }
// Iteration support.
InternalIndex operator*() { return *this; }
- bool operator!=(const InternalIndex& other) { return entry_ != other.entry_; }
+ bool operator!=(const InternalIndex& other) const {
+ return entry_ != other.entry_;
+ }
InternalIndex& operator++() {
entry_++;
return *this;
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index e1cee90422..4840a83975 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -24,6 +24,7 @@
#include "src/objects/js-number-format-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/property-descriptor.h"
+#include "src/objects/smi.h"
#include "src/objects/string.h"
#include "src/strings/string-case.h"
#include "unicode/basictz.h"
@@ -1275,28 +1276,72 @@ Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions(
// 15. Else If mnfd is not undefined or mxfd is not undefined, then
if (!mnfd_obj->IsUndefined(isolate) || !mxfd_obj->IsUndefined(isolate)) {
- // 15. b. Let mnfd be ? DefaultNumberOption(mnfd, 0, 20, mnfdDefault).
+ Handle<String> mxfd_str = factory->maximumFractionDigits_string();
Handle<String> mnfd_str = factory->minimumFractionDigits_string();
- if (!DefaultNumberOption(isolate, mnfd_obj, 0, 20, mnfd_default, mnfd_str)
- .To(&mnfd)) {
+
+ int specified_mnfd;
+ int specified_mxfd;
+
+ // a. Let _specifiedMnfd_ be ? DefaultNumberOption(_mnfd_, 0, 20,
+ // *undefined*).
+ if (!DefaultNumberOption(isolate, mnfd_obj, 0, 20, -1, mnfd_str)
+ .To(&specified_mnfd)) {
+ return Nothing<NumberFormatDigitOptions>();
+ }
+ Handle<Object> specifiedMnfd_obj;
+ if (specified_mnfd < 0) {
+ specifiedMnfd_obj = factory->undefined_value();
+ } else {
+ specifiedMnfd_obj = handle(Smi::FromInt(specified_mnfd), isolate);
+ }
+
+ // b. Let _specifiedMxfd_ be ? DefaultNumberOption(_mxfd_, 0, 20,
+ // *undefined*).
+ if (!DefaultNumberOption(isolate, mxfd_obj, 0, 20, -1, mxfd_str)
+ .To(&specified_mxfd)) {
return Nothing<NumberFormatDigitOptions>();
}
+ Handle<Object> specifiedMxfd_obj;
+ if (specified_mxfd < 0) {
+ specifiedMxfd_obj = factory->undefined_value();
+ } else {
+ specifiedMxfd_obj = handle(Smi::FromInt(specified_mxfd), isolate);
+ }
- // 15. c. Let mxfdActualDefault be max( mnfd, mxfdDefault ).
- int mxfd_actual_default = std::max(mnfd, mxfd_default);
+ // c. If _specifiedMxfd_ is not *undefined*, set _mnfdDefault_ to
+ // min(_mnfdDefault_, _specifiedMxfd_).
+ if (specified_mxfd >= 0) {
+ mnfd_default = std::min(mnfd_default, specified_mxfd);
+ }
- // 15. d. Let mxfd be ? DefaultNumberOption(mxfd, mnfd, 20,
- // mxfdActualDefault).
- Handle<String> mxfd_str = factory->maximumFractionDigits_string();
- if (!DefaultNumberOption(isolate, mxfd_obj, mnfd, 20, mxfd_actual_default,
- mxfd_str)
+ // d. Set _mnfd_ to ! DefaultNumberOption(_specifiedMnfd_, 0, 20,
+ // _mnfdDefault_).
+ if (!DefaultNumberOption(isolate, specifiedMnfd_obj, 0, 20, mnfd_default,
+ mnfd_str)
+ .To(&mnfd)) {
+ return Nothing<NumberFormatDigitOptions>();
+ }
+
+ // e. Set _mxfd_ to ! DefaultNumberOption(_specifiedMxfd_, 0, 20,
+ // max(_mxfdDefault_, _mnfd_)).
+ if (!DefaultNumberOption(isolate, specifiedMxfd_obj, 0, 20,
+ std::max(mxfd_default, mnfd), mxfd_str)
.To(&mxfd)) {
return Nothing<NumberFormatDigitOptions>();
}
- // 15. e. Set intlObj.[[MinimumFractionDigits]] to mnfd.
+
+ // f. If _mnfd_ is greater than _mxfd_, throw a *RangeError* exception.
+ if (mnfd > mxfd) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kPropertyValueOutOfRange, mxfd_str),
+ Nothing<NumberFormatDigitOptions>());
+ }
+
+ // g. Set intlObj.[[MinimumFractionDigits]] to mnfd.
digit_options.minimum_fraction_digits = mnfd;
- // 15. f. Set intlObj.[[MaximumFractionDigits]] to mxfd.
+ // h. Set intlObj.[[MaximumFractionDigits]] to mxfd.
digit_options.maximum_fraction_digits = mxfd;
// Else If intlObj.[[Notation]] is "compact", then
} else if (notation_is_compact) {
diff --git a/deps/v8/src/objects/intl-objects.tq b/deps/v8/src/objects/intl-objects.tq
index 88714f2bee..61f85facfd 100644
--- a/deps/v8/src/objects/intl-objects.tq
+++ b/deps/v8/src/objects/intl-objects.tq
@@ -2,157 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include 'src/objects/js-break-iterator.h'
-#include 'src/objects/js-collator.h'
-#include 'src/objects/js-date-time-format.h'
-#include 'src/objects/js-display-names.h'
-#include 'src/objects/js-list-format.h'
-#include 'src/objects/js-locale.h'
-#include 'src/objects/js-number-format.h'
#include 'src/objects/js-objects.h'
-#include 'src/objects/js-plural-rules.h'
-#include 'src/objects/js-relative-time-format.h'
-#include 'src/objects/js-segment-iterator.h'
-#include 'src/objects/js-segmenter.h'
-#include 'src/objects/js-segments.h'
-
-type DateTimeStyle extends int32 constexpr 'JSDateTimeFormat::DateTimeStyle';
-type HourCycle extends int32 constexpr 'JSDateTimeFormat::HourCycle';
-bitfield struct JSDateTimeFormatFlags extends uint31 {
- hour_cycle: HourCycle: 3 bit;
- date_style: DateTimeStyle: 3 bit;
- time_style: DateTimeStyle: 3 bit;
-}
-
-@generateCppClass
-extern class JSDateTimeFormat extends JSObject {
- locale: String;
- icu_locale: Foreign; // Managed<icu::Locale>
- icu_simple_date_format: Foreign; // Managed<icu::SimpleDateFormat>
- icu_date_interval_format: Foreign; // Managed<icu::DateIntervalFormat>
- bound_format: JSFunction|Undefined;
- flags: SmiTagged<JSDateTimeFormatFlags>;
-}
-
-type JSDisplayNamesStyle extends int32 constexpr 'JSDisplayNames::Style';
-type JSDisplayNamesFallback extends int32
-constexpr 'JSDisplayNames::Fallback';
-bitfield struct JSDisplayNamesFlags extends uint31 {
- style: JSDisplayNamesStyle: 2 bit;
- fallback: JSDisplayNamesFallback: 1 bit;
-}
-
-@generateCppClass
-extern class JSDisplayNames extends JSObject {
- internal: Foreign; // Managed<DisplayNamesInternal>
- flags: SmiTagged<JSDisplayNamesFlags>;
-}
-
-type JSListFormatStyle extends int32 constexpr 'JSListFormat::Style';
-type JSListFormatType extends int32 constexpr 'JSListFormat::Type';
-bitfield struct JSListFormatFlags extends uint31 {
- style: JSListFormatStyle: 2 bit;
- Type: JSListFormatType: 2 bit; // "type" is a reserved word.
-}
-
-@generateCppClass
-extern class JSListFormat extends JSObject {
- locale: String;
- icu_formatter: Foreign; // Managed<icu::ListFormatter>
- flags: SmiTagged<JSListFormatFlags>;
-}
-
-@generateCppClass
-extern class JSNumberFormat extends JSObject {
- locale: String;
- icu_number_formatter:
- Foreign; // Managed<icu::number::LocalizedNumberFormatter>
- bound_format: JSFunction|Undefined;
-}
-
-type JSPluralRulesType extends int32 constexpr 'JSPluralRules::Type';
-bitfield struct JSPluralRulesFlags extends uint31 {
- Type: JSPluralRulesType: 1 bit; // "type" is a reserved word.
-}
-
-@generateCppClass
-extern class JSPluralRules extends JSObject {
- locale: String;
- flags: SmiTagged<JSPluralRulesFlags>;
- icu_plural_rules: Foreign; // Managed<icu::PluralRules>
- icu_number_formatter:
- Foreign; // Managed<icu::number::LocalizedNumberFormatter>
-}
-
-type JSRelativeTimeFormatNumeric extends int32
-constexpr 'JSRelativeTimeFormat::Numeric';
-bitfield struct JSRelativeTimeFormatFlags extends uint31 {
- numeric: JSRelativeTimeFormatNumeric: 1 bit;
-}
-
-@generateCppClass
-extern class JSRelativeTimeFormat extends JSObject {
- locale: String;
- numberingSystem: String;
- icu_formatter: Foreign; // Managed<icu::RelativeDateTimeFormatter>
- flags: SmiTagged<JSRelativeTimeFormatFlags>;
-}
-
-@generateCppClass
-extern class JSLocale extends JSObject {
- icu_locale: Foreign; // Managed<icu::Locale>
-}
-
-type JSSegmenterGranularity extends int32
-constexpr 'JSSegmenter::Granularity';
-bitfield struct JSSegmenterFlags extends uint31 {
- granularity: JSSegmenterGranularity: 2 bit;
-}
-
-@generateCppClass
-extern class JSSegmenter extends JSObject {
- locale: String;
- icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
- flags: SmiTagged<JSSegmenterFlags>;
-}
-
-bitfield struct JSSegmentsFlags extends uint31 {
- granularity: JSSegmenterGranularity: 2 bit;
-}
-
-@generateCppClass
-extern class JSSegments extends JSObject {
- icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
- unicode_string: Foreign; // Managed<icu::UnicodeString>
- flags: SmiTagged<JSSegmentsFlags>;
-}
-
-bitfield struct JSSegmentIteratorFlags extends uint31 {
- granularity: JSSegmenterGranularity: 2 bit;
-}
-
-@generateCppClass
-extern class JSSegmentIterator extends JSObject {
- icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
- unicode_string: Foreign; // Managed<icu::UnicodeString>
- flags: SmiTagged<JSSegmentIteratorFlags>;
-}
-
-@generateCppClass
-extern class JSV8BreakIterator extends JSObject {
- locale: String;
- break_iterator: Foreign; // Managed<icu::BreakIterator>;
- unicode_string: Foreign; // Managed<icu::UnicodeString>;
- bound_adopt_text: Undefined|JSFunction;
- bound_first: Undefined|JSFunction;
- bound_next: Undefined|JSFunction;
- bound_current: Undefined|JSFunction;
- bound_break_type: Undefined|JSFunction;
-}
-
-@generateCppClass
-extern class JSCollator extends JSObject {
- icu_collator: Foreign; // Managed<icu::Collator>
- bound_compare: Undefined|JSFunction;
- locale: String;
-}
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 1a9a89695c..9f2046382f 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -20,11 +20,17 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-array-buffer-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSArrayBuffer)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSArrayBufferView)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSTypedArray)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSDataView)
+void JSArrayBuffer::AllocateExternalPointerEntries(Isolate* isolate) {
+ InitExternalPointerField(kBackingStoreOffset, isolate);
+}
+
size_t JSArrayBuffer::byte_length() const {
return ReadField<size_t>(kByteLengthOffset);
}
@@ -34,26 +40,25 @@ void JSArrayBuffer::set_byte_length(size_t value) {
}
DEF_GETTER(JSArrayBuffer, backing_store, void*) {
- ExternalPointer_t encoded_value =
- ReadField<ExternalPointer_t>(kBackingStoreOffset);
- return reinterpret_cast<void*>(DecodeExternalPointer(isolate, encoded_value));
+ Address value = ReadExternalPointerField(kBackingStoreOffset, isolate,
+ kArrayBufferBackingStoreTag);
+ return reinterpret_cast<void*>(value);
}
void JSArrayBuffer::set_backing_store(Isolate* isolate, void* value) {
- ExternalPointer_t encoded_value =
- EncodeExternalPointer(isolate, reinterpret_cast<Address>(value));
- WriteField<ExternalPointer_t>(kBackingStoreOffset, encoded_value);
+ WriteExternalPointerField(kBackingStoreOffset, isolate,
+ reinterpret_cast<Address>(value),
+ kArrayBufferBackingStoreTag);
}
uint32_t JSArrayBuffer::GetBackingStoreRefForDeserialization() const {
- ExternalPointer_t encoded_value =
- ReadField<ExternalPointer_t>(kBackingStoreOffset);
- return static_cast<uint32_t>(encoded_value);
+ return static_cast<uint32_t>(
+ ReadField<ExternalPointer_t>(kBackingStoreOffset));
}
void JSArrayBuffer::SetBackingStoreRefForSerialization(uint32_t ref) {
- ExternalPointer_t encoded_value = ref;
- WriteField<ExternalPointer_t>(kBackingStoreOffset, encoded_value);
+ WriteField<ExternalPointer_t>(kBackingStoreOffset,
+ static_cast<ExternalPointer_t>(ref));
}
ArrayBufferExtension* JSArrayBuffer::extension() const {
@@ -160,7 +165,6 @@ BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_asmjs_memory,
BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_shared,
JSArrayBuffer::IsSharedBit)
-
size_t JSArrayBufferView::byte_offset() const {
return ReadField<size_t>(kByteOffsetOffset);
}
@@ -181,6 +185,10 @@ bool JSArrayBufferView::WasDetached() const {
return JSArrayBuffer::cast(buffer()).was_detached();
}
+void JSTypedArray::AllocateExternalPointerEntries(Isolate* isolate) {
+ InitExternalPointerField(kExternalPointerOffset, isolate);
+}
+
size_t JSTypedArray::length() const { return ReadField<size_t>(kLengthOffset); }
void JSTypedArray::set_length(size_t value) {
@@ -188,20 +196,23 @@ void JSTypedArray::set_length(size_t value) {
}
DEF_GETTER(JSTypedArray, external_pointer, Address) {
- ExternalPointer_t encoded_value =
- ReadField<ExternalPointer_t>(kExternalPointerOffset);
- return DecodeExternalPointer(isolate, encoded_value);
+ return ReadExternalPointerField(kExternalPointerOffset, isolate,
+ kTypedArrayExternalPointerTag);
+}
+
+DEF_GETTER(JSTypedArray, external_pointer_raw, ExternalPointer_t) {
+ return ReadField<ExternalPointer_t>(kExternalPointerOffset);
}
void JSTypedArray::set_external_pointer(Isolate* isolate, Address value) {
- ExternalPointer_t encoded_value = EncodeExternalPointer(isolate, value);
- WriteField<ExternalPointer_t>(kExternalPointerOffset, encoded_value);
+ WriteExternalPointerField(kExternalPointerOffset, isolate, value,
+ kTypedArrayExternalPointerTag);
}
Address JSTypedArray::ExternalPointerCompensationForOnHeapArray(
- const Isolate* isolate) {
+ IsolateRoot isolate) {
#ifdef V8_COMPRESS_POINTERS
- return GetIsolateRoot(isolate);
+ return isolate.address();
#else
return 0;
#endif
@@ -209,15 +220,14 @@ Address JSTypedArray::ExternalPointerCompensationForOnHeapArray(
uint32_t JSTypedArray::GetExternalBackingStoreRefForDeserialization() const {
DCHECK(!is_on_heap());
- ExternalPointer_t encoded_value =
- ReadField<ExternalPointer_t>(kExternalPointerOffset);
- return static_cast<uint32_t>(encoded_value);
+ return static_cast<uint32_t>(
+ ReadField<ExternalPointer_t>(kExternalPointerOffset));
}
void JSTypedArray::SetExternalBackingStoreRefForSerialization(uint32_t ref) {
DCHECK(!is_on_heap());
- ExternalPointer_t encoded_value = ref;
- WriteField<ExternalPointer_t>(kExternalPointerOffset, encoded_value);
+ WriteField<ExternalPointer_t>(kExternalPointerOffset,
+ static_cast<ExternalPointer_t>(ref));
}
void JSTypedArray::RemoveExternalPointerCompensationForSerialization(
@@ -227,9 +237,15 @@ void JSTypedArray::RemoveExternalPointerCompensationForSerialization(
// compensation by replacing external_pointer and base_pointer fields
// with one data_pointer field which can point to either external data
// backing store or into on-heap backing store.
- set_external_pointer(
- isolate,
- external_pointer() - ExternalPointerCompensationForOnHeapArray(isolate));
+ Address offset =
+ external_pointer() - ExternalPointerCompensationForOnHeapArray(isolate);
+#ifdef V8_HEAP_SANDBOX
+ // Write decompensated offset directly to the external pointer field, thus
+ // allowing the offset to be propagated through serialization-deserialization.
+ WriteField<ExternalPointer_t>(kExternalPointerOffset, offset);
+#else
+ set_external_pointer(isolate, offset);
+#endif
}
void* JSTypedArray::DataPtr() {
@@ -287,15 +303,18 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
}
DEF_GETTER(JSDataView, data_pointer, void*) {
- ExternalPointer_t encoded_value =
- ReadField<ExternalPointer_t>(kDataPointerOffset);
- return reinterpret_cast<void*>(DecodeExternalPointer(isolate, encoded_value));
+ return reinterpret_cast<void*>(ReadExternalPointerField(
+ kDataPointerOffset, isolate, kDataViewDataPointerTag));
+}
+
+void JSDataView::AllocateExternalPointerEntries(Isolate* isolate) {
+ InitExternalPointerField(kDataPointerOffset, isolate);
}
void JSDataView::set_data_pointer(Isolate* isolate, void* value) {
- WriteField<ExternalPointer_t>(
- kDataPointerOffset,
- EncodeExternalPointer(isolate, reinterpret_cast<Address>(value)));
+ WriteExternalPointerField(kDataPointerOffset, isolate,
+ reinterpret_cast<Address>(value),
+ kDataViewDataPointerTag);
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index c480e77041..72dfde896e 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -44,6 +44,7 @@ void JSArrayBuffer::Setup(SharedFlag shared,
SetEmbedderField(i, Smi::zero());
}
set_extension(nullptr);
+ AllocateExternalPointerEntries(GetIsolate());
if (!backing_store) {
set_backing_store(GetIsolate(), nullptr);
set_byte_length(0);
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index 543cbc1a34..6a61ce4385 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -17,6 +17,8 @@ namespace internal {
class ArrayBufferExtension;
+#include "torque-generated/src/objects/js-array-buffer-tq.inc"
+
class JSArrayBuffer
: public TorqueGeneratedJSArrayBuffer<JSArrayBuffer, JSObject> {
public:
@@ -30,6 +32,12 @@ class JSArrayBuffer
static constexpr size_t kMaxByteLength = kMaxSafeInteger;
#endif
+ // When soft sandbox is enabled, creates entries in external pointer table for
+ // all JSArrayBuffer's fields that require soft sandbox protection (backing
+ // store pointer, backing store length, etc.).
+ // When sandbox is not enabled, it's a no-op.
+ inline void AllocateExternalPointerEntries(Isolate* isolate);
+
// [byte_length]: length in bytes
DECL_PRIMITIVE_ACCESSORS(byte_length, size_t)
@@ -258,6 +266,12 @@ class JSTypedArray
V8_EXPORT_PRIVATE Handle<JSArrayBuffer> GetBuffer();
+ // When soft sandbox is enabled, creates entries in external pointer table for
+ // all JSTypedArray's fields that require soft sandbox protection (external
+ // pointer, offset, length, etc.).
+ // When sandbox is not enabled, it's a no-op.
+ inline void AllocateExternalPointerEntries(Isolate* isolate);
+
// Use with care: returns raw pointer into heap.
inline void* DataPtr();
@@ -278,7 +292,7 @@ class JSTypedArray
// as Tagged_t value and an |external_pointer| value.
// For full-pointer mode the compensation value is zero.
static inline Address ExternalPointerCompensationForOnHeapArray(
- const Isolate* isolate);
+ IsolateRoot isolate);
//
// Serializer/deserializer support.
@@ -324,6 +338,8 @@ class JSTypedArray
// [external_pointer]: TODO(v8:4153)
DECL_GETTER(external_pointer, Address)
+ DECL_GETTER(external_pointer_raw, ExternalPointer_t)
+
inline void set_external_pointer(Isolate* isolate, Address value);
TQ_OBJECT_CONSTRUCTORS(JSTypedArray)
@@ -336,6 +352,12 @@ class JSDataView
DECL_GETTER(data_pointer, void*)
inline void set_data_pointer(Isolate* isolate, void* value);
+ // When soft sandbox is enabled, creates entries in external pointer table for
+ // all JSDataView's fields that require soft sandbox protection (data pointer,
+ // offset, length, etc.).
+ // When sandbox is not enabled, it's a no-op.
+ inline void AllocateExternalPointerEntries(Isolate* isolate);
+
// Dispatched behavior.
DECL_PRINTER(JSDataView)
DECL_VERIFIER(JSDataView)
diff --git a/deps/v8/src/objects/js-array-buffer.tq b/deps/v8/src/objects/js-array-buffer.tq
index b7b547a1db..6dcf03bd05 100644
--- a/deps/v8/src/objects/js-array-buffer.tq
+++ b/deps/v8/src/objects/js-array-buffer.tq
@@ -49,6 +49,11 @@ extern class JSTypedArray extends JSArrayBufferView {
base_pointer: ByteArray|Smi;
}
+extern operator '.external_pointer_ptr' macro
+LoadJSTypedArrayExternalPointerPtr(JSTypedArray): RawPtr;
+extern operator '.external_pointer_ptr=' macro
+StoreJSTypedArrayExternalPointerPtr(JSTypedArray, RawPtr);
+
@generateCppClass
extern class JSDataView extends JSArrayBufferView {
data_pointer: ExternalPointer;
diff --git a/deps/v8/src/objects/js-break-iterator-inl.h b/deps/v8/src/objects/js-break-iterator-inl.h
index 729aff90af..dc85efe3fe 100644
--- a/deps/v8/src/objects/js-break-iterator-inl.h
+++ b/deps/v8/src/objects/js-break-iterator-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-break-iterator-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSV8BreakIterator)
ACCESSORS(JSV8BreakIterator, break_iterator, Managed<icu::BreakIterator>,
diff --git a/deps/v8/src/objects/js-break-iterator.h b/deps/v8/src/objects/js-break-iterator.h
index e06b7b42b0..92104084ad 100644
--- a/deps/v8/src/objects/js-break-iterator.h
+++ b/deps/v8/src/objects/js-break-iterator.h
@@ -27,6 +27,8 @@ class BreakIterator;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-break-iterator-tq.inc"
+
class JSV8BreakIterator
: public TorqueGeneratedJSV8BreakIterator<JSV8BreakIterator, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-break-iterator.tq b/deps/v8/src/objects/js-break-iterator.tq
new file mode 100644
index 0000000000..08d121520a
--- /dev/null
+++ b/deps/v8/src/objects/js-break-iterator.tq
@@ -0,0 +1,17 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-break-iterator.h'
+
+@generateCppClass
+extern class JSV8BreakIterator extends JSObject {
+ locale: String;
+ break_iterator: Foreign; // Managed<icu::BreakIterator>;
+ unicode_string: Foreign; // Managed<icu::UnicodeString>;
+ bound_adopt_text: Undefined|JSFunction;
+ bound_first: Undefined|JSFunction;
+ bound_next: Undefined|JSFunction;
+ bound_current: Undefined|JSFunction;
+ bound_break_type: Undefined|JSFunction;
+}
diff --git a/deps/v8/src/objects/js-collator-inl.h b/deps/v8/src/objects/js-collator-inl.h
index 30660f2e14..81ee95326a 100644
--- a/deps/v8/src/objects/js-collator-inl.h
+++ b/deps/v8/src/objects/js-collator-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-collator-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSCollator)
ACCESSORS(JSCollator, icu_collator, Managed<icu::Collator>, kIcuCollatorOffset)
diff --git a/deps/v8/src/objects/js-collator.h b/deps/v8/src/objects/js-collator.h
index 7e3cbc35c9..eaeac21d59 100644
--- a/deps/v8/src/objects/js-collator.h
+++ b/deps/v8/src/objects/js-collator.h
@@ -29,6 +29,8 @@ class Collator;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-collator-tq.inc"
+
class JSCollator : public TorqueGeneratedJSCollator<JSCollator, JSObject> {
public:
// ecma402/#sec-initializecollator
diff --git a/deps/v8/src/objects/js-collator.tq b/deps/v8/src/objects/js-collator.tq
new file mode 100644
index 0000000000..2e1c847534
--- /dev/null
+++ b/deps/v8/src/objects/js-collator.tq
@@ -0,0 +1,12 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-collator.h'
+
+@generateCppClass
+extern class JSCollator extends JSObject {
+ icu_collator: Foreign; // Managed<icu::Collator>
+ bound_compare: Undefined|JSFunction;
+ locale: String;
+}
diff --git a/deps/v8/src/objects/js-collection-inl.h b/deps/v8/src/objects/js-collection-inl.h
index 6bbaa9bc1f..f2471175aa 100644
--- a/deps/v8/src/objects/js-collection-inl.h
+++ b/deps/v8/src/objects/js-collection-inl.h
@@ -5,10 +5,10 @@
#ifndef V8_OBJECTS_JS_COLLECTION_INL_H_
#define V8_OBJECTS_JS_COLLECTION_INL_H_
-#include "src/objects/js-collection.h"
-
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/heap-object-inl.h"
+#include "src/objects/js-collection-iterator-inl.h"
+#include "src/objects/js-collection.h"
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/roots/roots-inl.h"
@@ -19,6 +19,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-collection-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSCollection)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSMap)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSSet)
@@ -26,10 +28,6 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakCollection)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakMap)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakSet)
-// TODO(jkummerow): Move JSCollectionIterator to js-collection.h?
-// TODO(jkummerow): Introduce IsJSCollectionIterator() check? Or unchecked
-// version of OBJECT_CONSTRUCTORS_IMPL macro?
-TQ_OBJECT_CONSTRUCTORS_IMPL(JSCollectionIterator)
template <class Derived, class TableType>
OrderedHashTableIterator<Derived, TableType>::OrderedHashTableIterator(
Address ptr)
@@ -51,7 +49,9 @@ CAST_ACCESSOR(JSMapIterator)
Object JSMapIterator::CurrentValue() {
OrderedHashMap table = OrderedHashMap::cast(this->table());
int index = Smi::ToInt(this->index());
- Object value = table.ValueAt(index);
+ DCHECK_GE(index, 0);
+ InternalIndex entry(index);
+ Object value = table.ValueAt(entry);
DCHECK(!value.IsTheHole());
return value;
}
diff --git a/deps/v8/src/objects/js-collection-iterator-inl.h b/deps/v8/src/objects/js-collection-iterator-inl.h
new file mode 100644
index 0000000000..d5354e76b7
--- /dev/null
+++ b/deps/v8/src/objects/js-collection-iterator-inl.h
@@ -0,0 +1,26 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_COLLECTION_ITERATOR_INL_H_
+#define V8_OBJECTS_JS_COLLECTION_ITERATOR_INL_H_
+
+#include "src/objects/js-collection-iterator.h"
+#include "src/objects/objects-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/js-collection-iterator-tq-inl.inc"
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSCollectionIterator)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_COLLECTION_ITERATOR_INL_H_
diff --git a/deps/v8/src/objects/js-collection-iterator.h b/deps/v8/src/objects/js-collection-iterator.h
index b193aa84cd..feb3da37fa 100644
--- a/deps/v8/src/objects/js-collection-iterator.h
+++ b/deps/v8/src/objects/js-collection-iterator.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-collection-iterator-tq.inc"
+
class JSCollectionIterator
: public TorqueGeneratedJSCollectionIterator<JSCollectionIterator,
JSObject> {
diff --git a/deps/v8/src/objects/js-collection.h b/deps/v8/src/objects/js-collection.h
index a0350726c0..9b3e9d0541 100644
--- a/deps/v8/src/objects/js-collection.h
+++ b/deps/v8/src/objects/js-collection.h
@@ -17,6 +17,8 @@ namespace internal {
class OrderedHashSet;
class OrderedHashMap;
+#include "torque-generated/src/objects/js-collection-tq.inc"
+
class JSCollection
: public TorqueGeneratedJSCollection<JSCollection, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-date-time-format-inl.h b/deps/v8/src/objects/js-date-time-format-inl.h
index 56d44cacf9..fefe081f8f 100644
--- a/deps/v8/src/objects/js-date-time-format-inl.h
+++ b/deps/v8/src/objects/js-date-time-format-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-date-time-format-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSDateTimeFormat)
ACCESSORS(JSDateTimeFormat, icu_locale, Managed<icu::Locale>, kIcuLocaleOffset)
diff --git a/deps/v8/src/objects/js-date-time-format.h b/deps/v8/src/objects/js-date-time-format.h
index 64c89eeaeb..52815f9e86 100644
--- a/deps/v8/src/objects/js-date-time-format.h
+++ b/deps/v8/src/objects/js-date-time-format.h
@@ -31,6 +31,8 @@ class SimpleDateFormat;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-date-time-format-tq.inc"
+
class JSDateTimeFormat
: public TorqueGeneratedJSDateTimeFormat<JSDateTimeFormat, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-date-time-format.tq b/deps/v8/src/objects/js-date-time-format.tq
new file mode 100644
index 0000000000..f45db187eb
--- /dev/null
+++ b/deps/v8/src/objects/js-date-time-format.tq
@@ -0,0 +1,23 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-date-time-format.h'
+
+type DateTimeStyle extends int32 constexpr 'JSDateTimeFormat::DateTimeStyle';
+type HourCycle extends int32 constexpr 'JSDateTimeFormat::HourCycle';
+bitfield struct JSDateTimeFormatFlags extends uint31 {
+ hour_cycle: HourCycle: 3 bit;
+ date_style: DateTimeStyle: 3 bit;
+ time_style: DateTimeStyle: 3 bit;
+}
+
+@generateCppClass
+extern class JSDateTimeFormat extends JSObject {
+ locale: String;
+ icu_locale: Foreign; // Managed<icu::Locale>
+ icu_simple_date_format: Foreign; // Managed<icu::SimpleDateFormat>
+ icu_date_interval_format: Foreign; // Managed<icu::DateIntervalFormat>
+ bound_format: JSFunction|Undefined;
+ flags: SmiTagged<JSDateTimeFormatFlags>;
+}
diff --git a/deps/v8/src/objects/js-display-names-inl.h b/deps/v8/src/objects/js-display-names-inl.h
index 40bea22c97..5cc5b0b066 100644
--- a/deps/v8/src/objects/js-display-names-inl.h
+++ b/deps/v8/src/objects/js-display-names-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-display-names-tq-inl.inc"
+
ACCESSORS(JSDisplayNames, internal, Managed<DisplayNamesInternal>,
kInternalOffset)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSDisplayNames)
diff --git a/deps/v8/src/objects/js-display-names.h b/deps/v8/src/objects/js-display-names.h
index cd3ca1ea47..837184d1de 100644
--- a/deps/v8/src/objects/js-display-names.h
+++ b/deps/v8/src/objects/js-display-names.h
@@ -25,6 +25,8 @@ namespace internal {
class DisplayNamesInternal;
+#include "torque-generated/src/objects/js-display-names-tq.inc"
+
class JSDisplayNames
: public TorqueGeneratedJSDisplayNames<JSDisplayNames, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-display-names.tq b/deps/v8/src/objects/js-display-names.tq
new file mode 100644
index 0000000000..d2edf228d0
--- /dev/null
+++ b/deps/v8/src/objects/js-display-names.tq
@@ -0,0 +1,19 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-display-names.h'
+
+type JSDisplayNamesStyle extends int32 constexpr 'JSDisplayNames::Style';
+type JSDisplayNamesFallback extends int32
+constexpr 'JSDisplayNames::Fallback';
+bitfield struct JSDisplayNamesFlags extends uint31 {
+ style: JSDisplayNamesStyle: 2 bit;
+ fallback: JSDisplayNamesFallback: 1 bit;
+}
+
+@generateCppClass
+extern class JSDisplayNames extends JSObject {
+ internal: Foreign; // Managed<DisplayNamesInternal>
+ flags: SmiTagged<JSDisplayNamesFlags>;
+}
diff --git a/deps/v8/src/objects/js-function-inl.h b/deps/v8/src/objects/js-function-inl.h
index 606deb290a..c937f02311 100644
--- a/deps/v8/src/objects/js-function-inl.h
+++ b/deps/v8/src/objects/js-function-inl.h
@@ -20,6 +20,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-function-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSFunctionOrBoundFunction)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSBoundFunction)
OBJECT_CONSTRUCTORS_IMPL(JSFunction, JSFunctionOrBoundFunction)
@@ -69,7 +71,8 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
mode = ConcurrencyMode::kNotConcurrent;
}
- DCHECK(!is_compiled() || ActiveTierIsIgnition() || ActiveTierIsNCI());
+ DCHECK(!is_compiled() || ActiveTierIsIgnition() || ActiveTierIsNCI() ||
+ ActiveTierIsMidtierTurboprop());
DCHECK(!ActiveTierIsTurbofan());
DCHECK(shared().IsInterpreted());
DCHECK(shared().allows_lazy_compilation() ||
@@ -97,8 +100,8 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
}
bool JSFunction::IsInOptimizationQueue() {
- return has_feedback_vector() && feedback_vector().optimization_marker() ==
- OptimizationMarker::kInOptimizationQueue;
+ if (!has_feedback_vector()) return false;
+ return IsInOptimizationQueueMarker(feedback_vector().optimization_marker());
}
void JSFunction::CompleteInobjectSlackTrackingIfActive() {
@@ -147,20 +150,6 @@ void JSFunction::set_shared(SharedFunctionInfo value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(*this, kSharedFunctionInfoOffset, value, mode);
}
-void JSFunction::ClearOptimizedCodeSlot(const char* reason) {
- if (has_feedback_vector() && feedback_vector().has_optimized_code()) {
- if (FLAG_trace_opt) {
- CodeTracer::Scope scope(GetIsolate()->GetCodeTracer());
- PrintF(scope.file(),
- "[evicting entry from optimizing code feedback slot (%s) for ",
- reason);
- ShortPrint(scope.file());
- PrintF(scope.file(), "]\n");
- }
- feedback_vector().ClearOptimizedCode();
- }
-}
-
void JSFunction::SetOptimizationMarker(OptimizationMarker marker) {
DCHECK(has_feedback_vector());
DCHECK(ChecksOptimizationMarker());
diff --git a/deps/v8/src/objects/js-function.cc b/deps/v8/src/objects/js-function.cc
index 6e83273e8f..6bb3665963 100644
--- a/deps/v8/src/objects/js-function.cc
+++ b/deps/v8/src/objects/js-function.cc
@@ -87,8 +87,11 @@ namespace {
// otherwise returns true and sets highest_tier.
bool HighestTierOf(CodeKinds kinds, CodeKind* highest_tier) {
DCHECK_EQ((kinds & ~kJSFunctionCodeKindsMask), 0);
- if ((kinds & CodeKindFlag::OPTIMIZED_FUNCTION) != 0) {
- *highest_tier = CodeKind::OPTIMIZED_FUNCTION;
+ if ((kinds & CodeKindFlag::TURBOFAN) != 0) {
+ *highest_tier = CodeKind::TURBOFAN;
+ return true;
+ } else if ((kinds & CodeKindFlag::TURBOPROP) != 0) {
+ *highest_tier = CodeKind::TURBOPROP;
return true;
} else if ((kinds & CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT) != 0) {
*highest_tier = CodeKind::NATIVE_CONTEXT_INDEPENDENT;
@@ -119,7 +122,7 @@ bool JSFunction::ActiveTierIsIgnition() const {
bool JSFunction::ActiveTierIsTurbofan() const {
CodeKind highest_tier;
if (!HighestTierOf(GetAvailableCodeKinds(), &highest_tier)) return false;
- return highest_tier == CodeKind::OPTIMIZED_FUNCTION;
+ return highest_tier == CodeKind::TURBOFAN;
}
bool JSFunction::ActiveTierIsNCI() const {
@@ -128,10 +131,30 @@ bool JSFunction::ActiveTierIsNCI() const {
return highest_tier == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
}
+bool JSFunction::ActiveTierIsToptierTurboprop() const {
+ CodeKind highest_tier;
+ if (!FLAG_turboprop) return false;
+ if (!HighestTierOf(GetAvailableCodeKinds(), &highest_tier)) return false;
+ return highest_tier == CodeKind::TURBOPROP && !FLAG_turboprop_as_midtier;
+}
+
+bool JSFunction::ActiveTierIsMidtierTurboprop() const {
+ CodeKind highest_tier;
+ if (!FLAG_turboprop_as_midtier) return false;
+ if (!HighestTierOf(GetAvailableCodeKinds(), &highest_tier)) return false;
+ return highest_tier == CodeKind::TURBOPROP && FLAG_turboprop_as_midtier;
+}
+
CodeKind JSFunction::NextTier() const {
- return (FLAG_turbo_nci_as_midtier && ActiveTierIsIgnition())
- ? CodeKind::NATIVE_CONTEXT_INDEPENDENT
- : CodeKind::OPTIMIZED_FUNCTION;
+ if (V8_UNLIKELY(FLAG_turbo_nci_as_midtier && ActiveTierIsIgnition())) {
+ return CodeKind::NATIVE_CONTEXT_INDEPENDENT;
+ } else if (V8_UNLIKELY(FLAG_turboprop) && ActiveTierIsMidtierTurboprop()) {
+ return CodeKind::TURBOFAN;
+ } else if (V8_UNLIKELY(FLAG_turboprop)) {
+ DCHECK(ActiveTierIsIgnition());
+ return CodeKind::TURBOPROP;
+ }
+ return CodeKind::TURBOFAN;
}
bool JSFunction::CanDiscardCompiled() const {
@@ -144,7 +167,7 @@ bool JSFunction::CanDiscardCompiled() const {
//
// Note that when the function has not yet been compiled we also return
// false; that's fine, since nothing must be discarded in that case.
- if (code().kind() == CodeKind::OPTIMIZED_FUNCTION) return true;
+ if (CodeKindIsOptimizedJSFunction(code().kind())) return true;
CodeKinds result = GetAvailableCodeKinds();
return (result & kJSFunctionCodeKindsMask) != 0;
}
@@ -205,7 +228,7 @@ Maybe<int> JSBoundFunction::GetLength(Isolate* isolate,
isolate);
int target_length = target->length();
- int length = Max(0, target_length - nof_bound_arguments);
+ int length = std::max(0, target_length - nof_bound_arguments);
return Just(length);
}
@@ -411,8 +434,9 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
Handle<HeapObject> prototype) {
- if (map->prototype() != *prototype)
+ if (map->prototype() != *prototype) {
Map::SetPrototype(function->GetIsolate(), map, prototype);
+ }
function->set_prototype_or_initial_map(*map);
map->SetConstructor(*function);
if (FLAG_trace_maps) {
@@ -604,9 +628,9 @@ bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
// 2) the prototype chain is modified during iteration, or 3) compilation
// failure occur during prototype chain iteration.
// So we take the maximum of two values.
- int expected_nof_properties =
- Max(static_cast<int>(constructor->shared().expected_nof_properties()),
- JSFunction::CalculateExpectedNofProperties(isolate, new_target));
+ int expected_nof_properties = std::max(
+ static_cast<int>(constructor->shared().expected_nof_properties()),
+ JSFunction::CalculateExpectedNofProperties(isolate, new_target));
JSFunction::CalculateInstanceSizeHelper(
instance_type, true, embedder_fields, expected_nof_properties,
&instance_size, &in_object_properties);
@@ -894,8 +918,8 @@ void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
CHECK_LE(max_nof_fields, JSObject::kMaxInObjectProperties);
CHECK_LE(static_cast<unsigned>(requested_embedder_fields),
static_cast<unsigned>(max_nof_fields));
- *in_object_properties = Min(requested_in_object_properties,
- max_nof_fields - requested_embedder_fields);
+ *in_object_properties = std::min(requested_in_object_properties,
+ max_nof_fields - requested_embedder_fields);
*instance_size =
header_size +
((requested_embedder_fields + *in_object_properties) << kTaggedSizeLog2);
diff --git a/deps/v8/src/objects/js-function.h b/deps/v8/src/objects/js-function.h
index e7f2c0caf3..9fab6bd6c7 100644
--- a/deps/v8/src/objects/js-function.h
+++ b/deps/v8/src/objects/js-function.h
@@ -7,7 +7,6 @@
#include "src/objects/code-kind.h"
#include "src/objects/js-objects.h"
-#include "torque-generated/class-definitions.h"
#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
@@ -16,6 +15,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-function-tq.inc"
+
// An abstract superclass for classes representing JavaScript function values.
// It doesn't carry any functionality but allows function classes to be
// identified in the type system.
@@ -115,6 +116,8 @@ class JSFunction : public JSFunctionOrBoundFunction {
V8_EXPORT_PRIVATE bool ActiveTierIsIgnition() const;
bool ActiveTierIsTurbofan() const;
bool ActiveTierIsNCI() const;
+ bool ActiveTierIsMidtierTurboprop() const;
+ bool ActiveTierIsToptierTurboprop() const;
CodeKind NextTier() const;
@@ -141,9 +144,6 @@ class JSFunction : public JSFunctionOrBoundFunction {
// Tells whether or not the function is on the concurrent recompilation queue.
inline bool IsInOptimizationQueue();
- // Clears the optimized code slot in the function's feedback vector.
- inline void ClearOptimizedCodeSlot(const char* reason);
-
// Sets the optimization marker in the function's feedback vector.
inline void SetOptimizationMarker(OptimizationMarker marker);
diff --git a/deps/v8/src/objects/js-function.tq b/deps/v8/src/objects/js-function.tq
new file mode 100644
index 0000000000..b2e8aa6be2
--- /dev/null
+++ b/deps/v8/src/objects/js-function.tq
@@ -0,0 +1,34 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+@abstract
+@generateCppClass
+@highestInstanceTypeWithinParentClassRange
+extern class JSFunctionOrBoundFunction extends JSObject {
+}
+
+@generateCppClass
+extern class JSBoundFunction extends JSFunctionOrBoundFunction {
+ // The wrapped function object.
+ bound_target_function: Callable;
+ // The value that is always passed as the this value when calling the wrapped
+ // function.
+ bound_this: JSAny|SourceTextModule;
+ // A list of values whose elements are used as the first arguments to any call
+ // to the wrapped function.
+ bound_arguments: FixedArray;
+}
+
+@highestInstanceTypeWithinParentClassRange
+extern class JSFunction extends JSFunctionOrBoundFunction {
+ shared_function_info: SharedFunctionInfo;
+ context: Context;
+ feedback_cell: FeedbackCell;
+ weak code: Code;
+
+ // Space for the following field may or may not be allocated.
+ @noVerifier weak prototype_or_initial_map: JSReceiver|Map;
+}
+
+type JSFunctionWithPrototypeSlot extends JSFunction;
diff --git a/deps/v8/src/objects/js-generator-inl.h b/deps/v8/src/objects/js-generator-inl.h
index 2d5e9fe03e..4e93938710 100644
--- a/deps/v8/src/objects/js-generator-inl.h
+++ b/deps/v8/src/objects/js-generator-inl.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-generator-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSGeneratorObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSAsyncFunctionObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSAsyncGeneratorObject)
diff --git a/deps/v8/src/objects/js-generator.h b/deps/v8/src/objects/js-generator.h
index bf35595fdd..99f05abcbc 100644
--- a/deps/v8/src/objects/js-generator.h
+++ b/deps/v8/src/objects/js-generator.h
@@ -17,6 +17,8 @@ namespace internal {
// Forward declarations.
class JSPromise;
+#include "torque-generated/src/objects/js-generator-tq.inc"
+
class JSGeneratorObject
: public TorqueGeneratedJSGeneratorObject<JSGeneratorObject, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-list-format-inl.h b/deps/v8/src/objects/js-list-format-inl.h
index 5cf95db4d5..e7e0384c99 100644
--- a/deps/v8/src/objects/js-list-format-inl.h
+++ b/deps/v8/src/objects/js-list-format-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-list-format-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSListFormat)
// Base list format accessors.
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index b17d38c43f..e48a387be5 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -29,46 +29,27 @@ namespace v8 {
namespace internal {
namespace {
-const char* kStandard = "standard";
-const char* kOr = "or";
-const char* kUnit = "unit";
-const char* kStandardShort = "standard-short";
-const char* kOrShort = "or-short";
-const char* kUnitShort = "unit-short";
-const char* kStandardNarrow = "standard-narrow";
-const char* kOrNarrow = "or-narrow";
-const char* kUnitNarrow = "unit-narrow";
-
-const char* GetIcuStyleString(JSListFormat::Style style,
- JSListFormat::Type type) {
+
+UListFormatterWidth GetIcuWidth(JSListFormat::Style style) {
+ switch (style) {
+ case JSListFormat::Style::LONG:
+ return ULISTFMT_WIDTH_WIDE;
+ case JSListFormat::Style::SHORT:
+ return ULISTFMT_WIDTH_SHORT;
+ case JSListFormat::Style::NARROW:
+ return ULISTFMT_WIDTH_NARROW;
+ }
+ UNREACHABLE();
+}
+
+UListFormatterType GetIcuType(JSListFormat::Type type) {
switch (type) {
case JSListFormat::Type::CONJUNCTION:
- switch (style) {
- case JSListFormat::Style::LONG:
- return kStandard;
- case JSListFormat::Style::SHORT:
- return kStandardShort;
- case JSListFormat::Style::NARROW:
- return kStandardNarrow;
- }
+ return ULISTFMT_TYPE_AND;
case JSListFormat::Type::DISJUNCTION:
- switch (style) {
- case JSListFormat::Style::LONG:
- return kOr;
- case JSListFormat::Style::SHORT:
- return kOrShort;
- case JSListFormat::Style::NARROW:
- return kOrNarrow;
- }
+ return ULISTFMT_TYPE_OR;
case JSListFormat::Type::UNIT:
- switch (style) {
- case JSListFormat::Style::LONG:
- return kUnit;
- case JSListFormat::Style::SHORT:
- return kUnitShort;
- case JSListFormat::Style::NARROW:
- return kUnitNarrow;
- }
+ return ULISTFMT_TYPE_UNITS;
}
UNREACHABLE();
}
@@ -143,7 +124,7 @@ MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
icu::Locale icu_locale = r.icu_locale;
UErrorCode status = U_ZERO_ERROR;
icu::ListFormatter* formatter = icu::ListFormatter::createInstance(
- icu_locale, GetIcuStyleString(style_enum, type_enum), status);
+ icu_locale, GetIcuType(type_enum), GetIcuWidth(style_enum), status);
if (U_FAILURE(status) || formatter == nullptr) {
delete formatter;
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
diff --git a/deps/v8/src/objects/js-list-format.h b/deps/v8/src/objects/js-list-format.h
index 34878b5661..123f9e459e 100644
--- a/deps/v8/src/objects/js-list-format.h
+++ b/deps/v8/src/objects/js-list-format.h
@@ -29,6 +29,8 @@ class ListFormatter;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-list-format-tq.inc"
+
class JSListFormat
: public TorqueGeneratedJSListFormat<JSListFormat, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-list-format.tq b/deps/v8/src/objects/js-list-format.tq
new file mode 100644
index 0000000000..95d80ea96d
--- /dev/null
+++ b/deps/v8/src/objects/js-list-format.tq
@@ -0,0 +1,19 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-list-format.h'
+
+type JSListFormatStyle extends int32 constexpr 'JSListFormat::Style';
+type JSListFormatType extends int32 constexpr 'JSListFormat::Type';
+bitfield struct JSListFormatFlags extends uint31 {
+ style: JSListFormatStyle: 2 bit;
+ Type: JSListFormatType: 2 bit; // "type" is a reserved word.
+}
+
+@generateCppClass
+extern class JSListFormat extends JSObject {
+ locale: String;
+ icu_formatter: Foreign; // Managed<icu::ListFormatter>
+ flags: SmiTagged<JSListFormatFlags>;
+}
diff --git a/deps/v8/src/objects/js-locale-inl.h b/deps/v8/src/objects/js-locale-inl.h
index cbd62b9a93..49c4dc7b4f 100644
--- a/deps/v8/src/objects/js-locale-inl.h
+++ b/deps/v8/src/objects/js-locale-inl.h
@@ -19,6 +19,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-locale-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSLocale)
ACCESSORS(JSLocale, icu_locale, Managed<icu::Locale>, kIcuLocaleOffset)
diff --git a/deps/v8/src/objects/js-locale.h b/deps/v8/src/objects/js-locale.h
index 62dceac85d..d864c8272f 100644
--- a/deps/v8/src/objects/js-locale.h
+++ b/deps/v8/src/objects/js-locale.h
@@ -25,6 +25,8 @@ class Locale;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-locale-tq.inc"
+
class JSLocale : public TorqueGeneratedJSLocale<JSLocale, JSObject> {
public:
// Creates locale object with properties derived from input locale string
diff --git a/deps/v8/src/objects/js-locale.tq b/deps/v8/src/objects/js-locale.tq
new file mode 100644
index 0000000000..55c80f926f
--- /dev/null
+++ b/deps/v8/src/objects/js-locale.tq
@@ -0,0 +1,10 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-locale.h'
+
+@generateCppClass
+extern class JSLocale extends JSObject {
+ icu_locale: Foreign; // Managed<icu::Locale>
+}
diff --git a/deps/v8/src/objects/js-number-format-inl.h b/deps/v8/src/objects/js-number-format-inl.h
index 035eaf57a3..cddc93afd2 100644
--- a/deps/v8/src/objects/js-number-format-inl.h
+++ b/deps/v8/src/objects/js-number-format-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-number-format-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSNumberFormat)
ACCESSORS(JSNumberFormat, icu_number_formatter,
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index 45b0eab2db..daedb2a23a 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -389,17 +389,17 @@ Handle<String> CurrencySignString(Isolate* isolate,
Handle<String> UnitDisplayString(Isolate* isolate,
const icu::UnicodeString& skeleton) {
// Ex: skeleton as
- // "measure-unit/length-meter .### rounding-mode-half-up unit-width-full-name"
+ // "unit/length-meter .### rounding-mode-half-up unit-width-full-name"
if (skeleton.indexOf("unit-width-full-name") >= 0) {
return ReadOnlyRoots(isolate).long_string_handle();
}
// Ex: skeleton as
- // "measure-unit/length-meter .### rounding-mode-half-up unit-width-narrow".
+ // "unit/length-meter .### rounding-mode-half-up unit-width-narrow".
if (skeleton.indexOf("unit-width-narrow") >= 0) {
return ReadOnlyRoots(isolate).narrow_string_handle();
}
// Ex: skeleton as
- // "measure-unit/length-foot .### rounding-mode-half-up"
+ // "unit/length-foot .### rounding-mode-half-up"
return ReadOnlyRoots(isolate).short_string_handle();
}
@@ -422,7 +422,7 @@ Notation NotationFromSkeleton(const icu::UnicodeString& skeleton) {
return Notation::COMPACT;
}
// Ex: skeleton as
- // "measure-unit/length-foot .### rounding-mode-half-up"
+ // "unit/length-foot .### rounding-mode-half-up"
return Notation::STANDARD;
}
@@ -562,14 +562,14 @@ namespace {
// Ex: percent .### rounding-mode-half-up
// Special case for "percent"
-// Ex: "measure-unit/length-kilometer per-measure-unit/duration-hour .###
-// rounding-mode-half-up" should return "kilometer-per-unit".
-// Ex: "measure-unit/duration-year .### rounding-mode-half-up" should return
+// Ex: "unit/milliliter-per-acre .### rounding-mode-half-up"
+// should return "milliliter-per-acre".
+// Ex: "unit/year .### rounding-mode-half-up" should return
// "year".
std::string UnitFromSkeleton(const icu::UnicodeString& skeleton) {
std::string str;
str = skeleton.toUTF8String<std::string>(str);
- std::string search("measure-unit/");
+ std::string search("unit/");
size_t begin = str.find(search);
if (begin == str.npos) {
// Special case for "percent".
@@ -578,64 +578,44 @@ std::string UnitFromSkeleton(const icu::UnicodeString& skeleton) {
}
return "";
}
- // Skip the type (ex: "length").
- // "measure-unit/length-kilometer per-measure-unit/duration-hour"
- // b
- begin = str.find("-", begin + search.size());
+ // Ex:
+ // "unit/acre .### rounding-mode-half-up"
+ // b
+ // Ex:
+ // "unit/milliliter-per-acre .### rounding-mode-half-up"
+ // b
+ begin += search.size();
if (begin == str.npos) {
return "";
}
- begin++; // Skip the '-'.
// Find the end of the subtype.
size_t end = str.find(" ", begin);
- // "measure-unit/length-kilometer per-measure-unit/duration-hour"
- // b e
+ // Ex:
+ // "unit/acre .### rounding-mode-half-up"
+ // b e
+ // Ex:
+ // "unit/milliliter-per-acre .### rounding-mode-half-up"
+ // b e
if (end == str.npos) {
end = str.size();
- return str.substr(begin, end - begin);
- }
- // "measure-unit/length-kilometer per-measure-unit/duration-hour"
- // b e
- // [result ]
- std::string result = str.substr(begin, end - begin);
- begin = end + 1;
- // "measure-unit/length-kilometer per-measure-unit/duration-hour"
- // [result ]eb
- std::string search_per("per-measure-unit/");
- begin = str.find(search_per, begin);
- // "measure-unit/length-kilometer per-measure-unit/duration-hour"
- // [result ]e b
- if (begin == str.npos) {
- return result;
- }
- // Skip the type (ex: "duration").
- begin = str.find("-", begin + search_per.size());
- // "measure-unit/length-kilometer per-measure-unit/duration-hour"
- // [result ]e b
- if (begin == str.npos) {
- return result;
}
- begin++; // Skip the '-'.
- // "measure-unit/length-kilometer per-measure-unit/duration-hour"
- // [result ]e b
- end = str.find(" ", begin);
- if (end == str.npos) {
- end = str.size();
- }
- // "measure-unit/length-kilometer per-measure-unit/duration-hour"
- // [result ] b e
- return result + "-per-" + str.substr(begin, end - begin);
+ return str.substr(begin, end - begin);
}
Style StyleFromSkeleton(const icu::UnicodeString& skeleton) {
if (skeleton.indexOf("currency/") >= 0) {
return Style::CURRENCY;
}
- if (skeleton.indexOf("measure-unit/") >= 0) {
- if (skeleton.indexOf("scale/100") >= 0 &&
- skeleton.indexOf("measure-unit/concentr-percent") >= 0) {
+ if (skeleton.indexOf("percent") >= 0) {
+ // percent precision-integer rounding-mode-half-up scale/100
+ if (skeleton.indexOf("scale/100") >= 0) {
return Style::PERCENT;
+ } else {
+ return Style::UNIT;
}
+ }
+ // Before ICU68: "measure-unit/", since ICU68 "unit/"
+ if (skeleton.indexOf("unit/") >= 0) {
return Style::UNIT;
}
return Style::DECIMAL;
diff --git a/deps/v8/src/objects/js-number-format.h b/deps/v8/src/objects/js-number-format.h
index 062f3e07a3..38710131d6 100644
--- a/deps/v8/src/objects/js-number-format.h
+++ b/deps/v8/src/objects/js-number-format.h
@@ -32,6 +32,8 @@ class LocalizedNumberFormatter;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-number-format-tq.inc"
+
class JSNumberFormat
: public TorqueGeneratedJSNumberFormat<JSNumberFormat, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-number-format.tq b/deps/v8/src/objects/js-number-format.tq
new file mode 100644
index 0000000000..b1b63016f1
--- /dev/null
+++ b/deps/v8/src/objects/js-number-format.tq
@@ -0,0 +1,13 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-number-format.h'
+
+@generateCppClass
+extern class JSNumberFormat extends JSObject {
+ locale: String;
+ icu_number_formatter:
+ Foreign; // Managed<icu::number::LocalizedNumberFormatter>
+ bound_format: JSFunction|Undefined;
+}
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index 9fcd183b89..65a50d3417 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_JS_OBJECTS_INL_H_
#define V8_OBJECTS_JS_OBJECTS_INL_H_
+#include "src/common/globals.h"
#include "src/heap/heap-write-barrier.h"
#include "src/objects/elements.h"
#include "src/objects/embedder-data-slot-inl.h"
@@ -27,6 +28,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-objects-tq-inl.inc"
+
OBJECT_CONSTRUCTORS_IMPL(JSReceiver, HeapObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSCustomElementsObject)
@@ -283,6 +286,10 @@ int JSObject::GetEmbedderFieldOffset(int index) {
return GetEmbedderFieldsStartOffset() + (kEmbedderDataSlotSize * index);
}
+void JSObject::InitializeEmbedderField(Isolate* isolate, int index) {
+ EmbedderDataSlot(*this, index).AllocateExternalPointerEntry(isolate);
+}
+
Object JSObject::GetEmbedderField(int index) {
return EmbedderDataSlot(*this, index).load_tagged();
}
@@ -296,11 +303,11 @@ void JSObject::SetEmbedderField(int index, Smi value) {
}
bool JSObject::IsUnboxedDoubleField(FieldIndex index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return IsUnboxedDoubleField(isolate, index);
}
-bool JSObject::IsUnboxedDoubleField(const Isolate* isolate,
+bool JSObject::IsUnboxedDoubleField(IsolateRoot isolate,
FieldIndex index) const {
if (!FLAG_unbox_double_fields) return false;
return map(isolate).IsUnboxedDoubleField(isolate, index);
@@ -310,11 +317,11 @@ bool JSObject::IsUnboxedDoubleField(const Isolate* isolate,
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
Object JSObject::RawFastPropertyAt(FieldIndex index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return RawFastPropertyAt(isolate, index);
}
-Object JSObject::RawFastPropertyAt(const Isolate* isolate,
+Object JSObject::RawFastPropertyAt(IsolateRoot isolate,
FieldIndex index) const {
DCHECK(!IsUnboxedDoubleField(isolate, index));
if (index.is_inobject()) {
@@ -357,7 +364,7 @@ void JSObject::RawFastDoublePropertyAsBitsAtPut(FieldIndex index,
// Double unboxing is enabled only on 64-bit platforms without pointer
// compression.
DCHECK_EQ(kDoubleSize, kTaggedSize);
- Address field_addr = FIELD_ADDR(*this, index.offset());
+ Address field_addr = field_address(index.offset());
base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(field_addr),
static_cast<base::AtomicWord>(bits));
}
@@ -633,9 +640,15 @@ void JSReceiver::initialize_properties(Isolate* isolate) {
ReadOnlyRoots roots(isolate);
DCHECK(!ObjectInYoungGeneration(roots.empty_fixed_array()));
DCHECK(!ObjectInYoungGeneration(roots.empty_property_dictionary()));
+ DCHECK(!ObjectInYoungGeneration(roots.empty_ordered_property_dictionary()));
if (map(isolate).is_dictionary_map()) {
- WRITE_FIELD(*this, kPropertiesOrHashOffset,
- roots.empty_property_dictionary());
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ WRITE_FIELD(*this, kPropertiesOrHashOffset,
+ roots.empty_ordered_property_dictionary());
+ } else {
+ WRITE_FIELD(*this, kPropertiesOrHashOffset,
+ roots.empty_property_dictionary());
+ }
} else {
WRITE_FIELD(*this, kPropertiesOrHashOffset, roots.empty_fixed_array());
}
@@ -644,7 +657,8 @@ void JSReceiver::initialize_properties(Isolate* isolate) {
DEF_GETTER(JSReceiver, HasFastProperties, bool) {
DCHECK(raw_properties_or_hash(isolate).IsSmi() ||
((raw_properties_or_hash(isolate).IsGlobalDictionary(isolate) ||
- raw_properties_or_hash(isolate).IsNameDictionary(isolate)) ==
+ raw_properties_or_hash(isolate).IsNameDictionary(isolate) ||
+ raw_properties_or_hash(isolate).IsOrderedNameDictionary(isolate)) ==
map(isolate).is_dictionary_map()));
return !map(isolate).is_dictionary_map();
}
@@ -652,6 +666,8 @@ DEF_GETTER(JSReceiver, HasFastProperties, bool) {
DEF_GETTER(JSReceiver, property_dictionary, NameDictionary) {
DCHECK(!IsJSGlobalObject(isolate));
DCHECK(!HasFastProperties(isolate));
+ DCHECK(!V8_DICT_MODE_PROTOTYPES_BOOL);
+
// Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
// i::GetIsolateForPtrCompr(HeapObject).
Object prop = raw_properties_or_hash(isolate);
@@ -661,6 +677,20 @@ DEF_GETTER(JSReceiver, property_dictionary, NameDictionary) {
return NameDictionary::cast(prop);
}
+DEF_GETTER(JSReceiver, property_dictionary_ordered, OrderedNameDictionary) {
+ DCHECK(!IsJSGlobalObject(isolate));
+ DCHECK(!HasFastProperties(isolate));
+ DCHECK(V8_DICT_MODE_PROTOTYPES_BOOL);
+
+ // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
+ // i::GetIsolateForPtrCompr(HeapObject).
+ Object prop = raw_properties_or_hash(isolate);
+ if (prop.IsSmi()) {
+ return GetReadOnlyRoots(isolate).empty_ordered_property_dictionary();
+ }
+ return OrderedNameDictionary::cast(prop);
+}
+
// TODO(gsathya): Pass isolate directly to this function and access
// the heap from this.
DEF_GETTER(JSReceiver, property_array, PropertyArray) {
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index 2d095d1743..f889c43499 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -5,6 +5,7 @@
#include "src/objects/js-objects.h"
#include "src/api/api-arguments-inl.h"
+#include "src/common/globals.h"
#include "src/date/date.h"
#include "src/execution/arguments.h"
#include "src/execution/frames.h"
@@ -70,8 +71,6 @@
#include "src/strings/string-stream.h"
#include "src/utils/ostreams.h"
#include "src/wasm/wasm-objects.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions.h"
namespace v8 {
namespace internal {
@@ -221,7 +220,8 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
return Just(false);
}
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate);
bool stable = true;
@@ -233,7 +233,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
// Directly decode from the descriptor array if |from| did not change shape.
if (stable) {
DCHECK_EQ(from->map(), *map);
- DCHECK_EQ(*descriptors, map->instance_descriptors());
+ DCHECK_EQ(*descriptors, map->instance_descriptors(kRelaxedLoad));
PropertyDetails details = descriptors->GetDetails(i);
if (!details.IsEnumerable()) continue;
@@ -252,7 +252,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
stable = from->map() == *map;
- descriptors.PatchValue(map->instance_descriptors());
+ descriptors.PatchValue(map->instance_descriptors(kRelaxedLoad));
}
} else {
// If the map did change, do a slower lookup. We are still guaranteed that
@@ -278,7 +278,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
if (result.IsNothing()) return result;
if (stable) {
stable = from->map() == *map;
- descriptors.PatchValue(map->instance_descriptors());
+ descriptors.PatchValue(map->instance_descriptors(kRelaxedLoad));
}
} else {
if (excluded_properties != nullptr &&
@@ -318,15 +318,24 @@ Maybe<bool> JSReceiver::SetOrCopyDataProperties(
GetKeysConversion::kKeepNumbers),
Nothing<bool>());
- if (!from->HasFastProperties() && target->HasFastProperties()) {
+ if (!from->HasFastProperties() && target->HasFastProperties() &&
+ !target->IsJSGlobalProxy()) {
+ // JSProxy is always in slow-mode.
+ DCHECK(!target->IsJSProxy());
// Convert to slow properties if we're guaranteed to overflow the number of
// descriptors.
- int source_length =
- from->IsJSGlobalObject()
- ? JSGlobalObject::cast(*from)
- .global_dictionary()
- .NumberOfEnumerableProperties()
- : from->property_dictionary().NumberOfEnumerableProperties();
+ int source_length;
+ if (from->IsJSGlobalObject()) {
+ source_length = JSGlobalObject::cast(*from)
+ .global_dictionary()
+ .NumberOfEnumerableProperties();
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ source_length =
+ from->property_dictionary_ordered().NumberOfEnumerableProperties();
+ } else {
+ source_length =
+ from->property_dictionary().NumberOfEnumerableProperties();
+ }
if (source_length > kMaxNumberOfDescriptors) {
JSObject::NormalizeProperties(isolate, Handle<JSObject>::cast(target),
CLEAR_INOBJECT_PROPERTIES, source_length,
@@ -606,7 +615,8 @@ Object SetHashAndUpdateProperties(HeapObject properties, int hash) {
ReadOnlyRoots roots = properties.GetReadOnlyRoots();
if (properties == roots.empty_fixed_array() ||
properties == roots.empty_property_array() ||
- properties == roots.empty_property_dictionary()) {
+ properties == roots.empty_property_dictionary() ||
+ properties == roots.empty_ordered_property_dictionary()) {
return Smi::FromInt(hash);
}
@@ -621,8 +631,13 @@ Object SetHashAndUpdateProperties(HeapObject properties, int hash) {
return properties;
}
- DCHECK(properties.IsNameDictionary());
- NameDictionary::cast(properties).SetHash(hash);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ DCHECK(properties.IsOrderedNameDictionary());
+ OrderedNameDictionary::cast(properties).SetHash(hash);
+ } else {
+ DCHECK(properties.IsNameDictionary());
+ NameDictionary::cast(properties).SetHash(hash);
+ }
return properties;
}
@@ -636,8 +651,12 @@ int GetIdentityHashHelper(JSReceiver object) {
if (properties.IsPropertyArray()) {
return PropertyArray::cast(properties).Hash();
}
+ if (V8_DICT_MODE_PROTOTYPES_BOOL && properties.IsOrderedNameDictionary()) {
+ return OrderedNameDictionary::cast(properties).Hash();
+ }
if (properties.IsNameDictionary()) {
+ DCHECK(!V8_DICT_MODE_PROTOTYPES_BOOL);
return NameDictionary::cast(properties).Hash();
}
@@ -648,7 +667,8 @@ int GetIdentityHashHelper(JSReceiver object) {
#ifdef DEBUG
ReadOnlyRoots roots = object.GetReadOnlyRoots();
DCHECK(properties == roots.empty_fixed_array() ||
- properties == roots.empty_property_dictionary());
+ properties == roots.empty_property_dictionary() ||
+ properties == roots.empty_ordered_property_dictionary());
#endif
return PropertyArray::kNoHashSentinel;
@@ -734,10 +754,19 @@ void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
cell->ClearAndInvalidate(ReadOnlyRoots(isolate));
} else {
- Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ Handle<OrderedNameDictionary> dictionary(
+ object->property_dictionary_ordered(), isolate);
- dictionary = NameDictionary::DeleteEntry(isolate, dictionary, entry);
- object->SetProperties(*dictionary);
+ dictionary =
+ OrderedNameDictionary::DeleteEntry(isolate, dictionary, entry);
+ object->SetProperties(*dictionary);
+ } else {
+ Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
+
+ dictionary = NameDictionary::DeleteEntry(isolate, dictionary, entry);
+ object->SetProperties(*dictionary);
+ }
}
if (object->map().is_prototype_map()) {
// Invalidate prototype validity cell as this may invalidate transitioning
@@ -1855,7 +1884,8 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
if (!map->OnlyHasSimpleProperties()) return Just(false);
Handle<JSObject> object(JSObject::cast(*receiver), isolate);
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
size_t number_of_own_elements =
@@ -1883,7 +1913,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
// side-effects.
bool stable = *map == object->map();
if (stable) {
- descriptors.PatchValue(map->instance_descriptors());
+ descriptors.PatchValue(map->instance_descriptors(kRelaxedLoad));
}
for (InternalIndex index : InternalIndex::Range(number_of_own_descriptors)) {
@@ -1896,7 +1926,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
// Directly decode from the descriptor array if |from| did not change shape.
if (stable) {
DCHECK_EQ(object->map(), *map);
- DCHECK_EQ(*descriptors, map->instance_descriptors());
+ DCHECK_EQ(*descriptors, map->instance_descriptors(kRelaxedLoad));
PropertyDetails details = descriptors->GetDetails(index);
if (!details.IsEnumerable()) continue;
@@ -1917,7 +1947,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
stable = object->map() == *map;
- descriptors.PatchValue(map->instance_descriptors());
+ descriptors.PatchValue(map->instance_descriptors(kRelaxedLoad));
}
} else {
// If the map did change, do a slower lookup. We are still guaranteed that
@@ -2038,6 +2068,21 @@ bool JSReceiver::HasProxyInPrototype(Isolate* isolate) {
return false;
}
+bool JSReceiver::IsCodeLike(Isolate* isolate) const {
+ DisallowGarbageCollection no_gc;
+ Object maybe_constructor = map().GetConstructor();
+ if (!maybe_constructor.IsJSFunction()) return false;
+ if (!JSFunction::cast(maybe_constructor).shared().IsApiFunction()) {
+ return false;
+ }
+ Object instance_template = JSFunction::cast(maybe_constructor)
+ .shared()
+ .get_api_func_data()
+ .GetInstanceTemplate();
+ if (instance_template.IsUndefined(isolate)) return false;
+ return ObjectTemplateInfo::cast(instance_template).code_like();
+}
+
// static
MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
Handle<JSReceiver> new_target,
@@ -2057,9 +2102,11 @@ MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, initial_map,
JSFunction::GetDerivedMap(isolate, constructor, new_target), JSObject);
+ int initial_capacity = V8_DICT_MODE_PROTOTYPES_BOOL
+ ? OrderedNameDictionary::kInitialCapacity
+ : NameDictionary::kInitialCapacity;
Handle<JSObject> result = isolate->factory()->NewFastOrSlowJSObjectFromMap(
- initial_map, NameDictionary::kInitialCapacity, AllocationType::kYoung,
- site);
+ initial_map, initial_capacity, AllocationType::kYoung, site);
isolate->counters()->constructed_objects()->Increment();
isolate->counters()->constructed_objects_runtime()->Increment();
return result;
@@ -2368,21 +2415,36 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
cell->set_value(*value);
}
} else {
- Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
-
- InternalIndex entry = dictionary->FindEntry(isolate, name);
- if (entry.is_not_found()) {
- DCHECK_IMPLIES(object->map().is_prototype_map(),
- Map::IsPrototypeChainInvalidated(object->map()));
- dictionary =
- NameDictionary::Add(isolate, dictionary, name, value, details);
- object->SetProperties(*dictionary);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ Handle<OrderedNameDictionary> dictionary(
+ object->property_dictionary_ordered(), isolate);
+ InternalIndex entry = dictionary->FindEntry(isolate, *name);
+ if (entry.is_not_found()) {
+ DCHECK_IMPLIES(object->map().is_prototype_map(),
+ Map::IsPrototypeChainInvalidated(object->map()));
+ dictionary = OrderedNameDictionary::Add(isolate, dictionary, name,
+ value, details)
+ .ToHandleChecked();
+ object->SetProperties(*dictionary);
+ } else {
+ dictionary->SetEntry(entry, *name, *value, details);
+ }
} else {
- PropertyDetails original_details = dictionary->DetailsAt(entry);
- int enumeration_index = original_details.dictionary_index();
- DCHECK_GT(enumeration_index, 0);
- details = details.set_index(enumeration_index);
- dictionary->SetEntry(entry, *name, *value, details);
+ Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
+ InternalIndex entry = dictionary->FindEntry(isolate, name);
+ if (entry.is_not_found()) {
+ DCHECK_IMPLIES(object->map().is_prototype_map(),
+ Map::IsPrototypeChainInvalidated(object->map()));
+ dictionary =
+ NameDictionary::Add(isolate, dictionary, name, value, details);
+ object->SetProperties(*dictionary);
+ } else {
+ PropertyDetails original_details = dictionary->DetailsAt(entry);
+ int enumeration_index = original_details.dictionary_index();
+ DCHECK_GT(enumeration_index, 0);
+ details = details.set_index(enumeration_index);
+ dictionary->SetEntry(entry, *name, *value, details);
+ }
}
}
}
@@ -2539,8 +2601,8 @@ void JSObject::PrintInstanceMigration(FILE* file, Map original_map,
return;
}
PrintF(file, "[migrating]");
- DescriptorArray o = original_map.instance_descriptors();
- DescriptorArray n = new_map.instance_descriptors();
+ DescriptorArray o = original_map.instance_descriptors(kRelaxedLoad);
+ DescriptorArray n = new_map.instance_descriptors(kRelaxedLoad);
for (InternalIndex i : original_map.IterateOwnDescriptors()) {
Representation o_r = o.GetDetails(i).representation();
Representation n_r = n.GetDetails(i).representation();
@@ -2728,9 +2790,9 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
isolate->factory()->NewFixedArray(inobject);
Handle<DescriptorArray> old_descriptors(
- old_map->instance_descriptors(isolate), isolate);
+ old_map->instance_descriptors(isolate, kRelaxedLoad), isolate);
Handle<DescriptorArray> new_descriptors(
- new_map->instance_descriptors(isolate), isolate);
+ new_map->instance_descriptors(isolate, kRelaxedLoad), isolate);
int old_nof = old_map->NumberOfOwnDescriptors();
int new_nof = new_map->NumberOfOwnDescriptors();
@@ -2818,7 +2880,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|.
- int limit = Min(inobject, number_of_fields);
+ int limit = std::min(inobject, number_of_fields);
for (int i = 0; i < limit; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
Object value = inobject_props->get(isolate, i);
@@ -2883,12 +2945,23 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
property_count += expected_additional_properties;
} else {
// Make space for two more properties.
- property_count += NameDictionary::kInitialCapacity;
+ int initial_capacity = V8_DICT_MODE_PROTOTYPES_BOOL
+ ? OrderedNameDictionary::kInitialCapacity
+ : NameDictionary::kInitialCapacity;
+ property_count += initial_capacity;
}
- Handle<NameDictionary> dictionary =
- NameDictionary::New(isolate, property_count);
- Handle<DescriptorArray> descs(map->instance_descriptors(isolate), isolate);
+ Handle<NameDictionary> dictionary;
+ Handle<OrderedNameDictionary> ord_dictionary;
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ ord_dictionary =
+ isolate->factory()->NewOrderedNameDictionary(property_count);
+ } else {
+ dictionary = isolate->factory()->NewNameDictionary(property_count);
+ }
+
+ Handle<DescriptorArray> descs(
+ map->instance_descriptors(isolate, kRelaxedLoad), isolate);
for (InternalIndex i : InternalIndex::Range(real_size)) {
PropertyDetails details = descs->GetDetails(i);
Handle<Name> key(descs->GetKey(isolate, i), isolate);
@@ -2919,11 +2992,19 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
DCHECK(!value.is_null());
PropertyDetails d(details.kind(), details.attributes(),
PropertyCellType::kNoCell);
- dictionary = NameDictionary::Add(isolate, dictionary, key, value, d);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ ord_dictionary =
+ OrderedNameDictionary::Add(isolate, ord_dictionary, key, value, d)
+ .ToHandleChecked();
+ } else {
+ dictionary = NameDictionary::Add(isolate, dictionary, key, value, d);
+ }
}
- // Copy the next enumeration index from instance descriptor.
- dictionary->set_next_enumeration_index(real_size + 1);
+ if (!V8_DICT_MODE_PROTOTYPES_BOOL) {
+ // Copy the next enumeration index from instance descriptor.
+ dictionary->set_next_enumeration_index(real_size + 1);
+ }
// From here on we cannot fail and we shouldn't GC anymore.
DisallowHeapAllocation no_allocation;
@@ -2951,7 +3032,11 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
// the left-over space to avoid races with the sweeper thread.
object->synchronized_set_map(*new_map);
- object->SetProperties(*dictionary);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ object->SetProperties(*ord_dictionary);
+ } else {
+ object->SetProperties(*dictionary);
+ }
// Ensure that in-object space of slow-mode object does not contain random
// garbage.
@@ -3079,7 +3164,8 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
if (!FLAG_unbox_double_fields || external > 0) {
Isolate* isolate = object->GetIsolate();
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate);
Handle<FixedArray> storage;
if (!FLAG_unbox_double_fields) {
storage = isolate->factory()->NewFixedArray(inobject);
@@ -3332,26 +3418,52 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
DCHECK(!object->IsJSGlobalObject());
Isolate* isolate = object->GetIsolate();
Factory* factory = isolate->factory();
- Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
+
+ Handle<NameDictionary> dictionary;
+ Handle<OrderedNameDictionary> ord_dictionary;
+ int number_of_elements;
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ ord_dictionary = handle(object->property_dictionary_ordered(), isolate);
+ number_of_elements = ord_dictionary->NumberOfElements();
+ } else {
+ dictionary = handle(object->property_dictionary(), isolate);
+ number_of_elements = dictionary->NumberOfElements();
+ }
// Make sure we preserve dictionary representation if there are too many
// descriptors.
- int number_of_elements = dictionary->NumberOfElements();
if (number_of_elements > kMaxNumberOfDescriptors) return;
- Handle<FixedArray> iteration_order =
- NameDictionary::IterationIndices(isolate, dictionary);
+ Handle<FixedArray> iteration_order;
+ int iteration_length;
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ // |iteration_order| remains empty handle, we don't need it.
+ iteration_length = ord_dictionary->UsedCapacity();
+ } else {
+ iteration_order = NameDictionary::IterationIndices(isolate, dictionary);
+ iteration_length = dictionary->NumberOfElements();
+ }
- int instance_descriptor_length = iteration_order->length();
int number_of_fields = 0;
// Compute the length of the instance descriptor.
ReadOnlyRoots roots(isolate);
- for (int i = 0; i < instance_descriptor_length; i++) {
- InternalIndex index(Smi::ToInt(iteration_order->get(i)));
- DCHECK(dictionary->IsKey(roots, dictionary->KeyAt(isolate, index)));
+ for (int i = 0; i < iteration_length; i++) {
+ PropertyKind kind;
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ InternalIndex index(i);
+ Object key = ord_dictionary->KeyAt(index);
+ if (!OrderedNameDictionary::IsKey(roots, key)) {
+ // Ignore deleted entries.
+ continue;
+ }
+ kind = ord_dictionary->DetailsAt(index).kind();
+ } else {
+ InternalIndex index(Smi::ToInt(iteration_order->get(i)));
+ DCHECK(dictionary->IsKey(roots, dictionary->KeyAt(isolate, index)));
+ kind = dictionary->DetailsAt(index).kind();
+ }
- PropertyKind kind = dictionary->DetailsAt(index).kind();
if (kind == kData) {
number_of_fields += 1;
}
@@ -3371,7 +3483,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
NotifyMapChange(old_map, new_map, isolate);
- if (instance_descriptor_length == 0) {
+ if (number_of_elements == 0) {
DisallowHeapAllocation no_gc;
DCHECK_LE(unused_property_fields, inobject_props);
// Transform the object.
@@ -3388,7 +3500,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
// Allocate the instance descriptor.
Handle<DescriptorArray> descriptors =
- DescriptorArray::Allocate(isolate, instance_descriptor_length, 0);
+ DescriptorArray::Allocate(isolate, number_of_elements, 0);
int number_of_allocated_fields =
number_of_fields + unused_property_fields - inobject_props;
@@ -3407,9 +3519,30 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
// Fill in the instance descriptor and the fields.
int current_offset = 0;
- for (int i = 0; i < instance_descriptor_length; i++) {
- InternalIndex index(Smi::ToInt(iteration_order->get(i)));
- Name k = dictionary->NameAt(index);
+ int descriptor_index = 0;
+ for (int i = 0; i < iteration_length; i++) {
+ Name k;
+ Object value;
+ PropertyDetails details = PropertyDetails::Empty();
+
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ InternalIndex index(i);
+ Object key_obj = ord_dictionary->KeyAt(index);
+ if (!OrderedNameDictionary::IsKey(roots, key_obj)) {
+ continue;
+ }
+ k = Name::cast(key_obj);
+
+ value = ord_dictionary->ValueAt(index);
+ details = ord_dictionary->DetailsAt(index);
+ } else {
+ InternalIndex index(Smi::ToInt(iteration_order->get(i)));
+ k = dictionary->NameAt(index);
+
+ value = dictionary->ValueAt(index);
+ details = dictionary->DetailsAt(index);
+ }
+
// Dictionary keys are internalized upon insertion.
// TODO(jkummerow): Turn this into a DCHECK if it's not hit in the wild.
CHECK(k.IsUniqueName());
@@ -3420,9 +3553,6 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
new_map->set_may_have_interesting_symbols(true);
}
- Object value = dictionary->ValueAt(index);
-
- PropertyDetails details = dictionary->DetailsAt(index);
DCHECK_EQ(kField, details.location());
DCHECK_EQ(PropertyConstness::kMutable, details.constness());
@@ -3453,9 +3583,10 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
}
current_offset += details.field_width_in_words();
}
- descriptors->Set(InternalIndex(i), &d);
+ descriptors->Set(InternalIndex(descriptor_index++), &d);
}
- DCHECK(current_offset == number_of_fields);
+ DCHECK_EQ(current_offset, number_of_fields);
+ DCHECK_EQ(descriptor_index, number_of_elements);
descriptors->Sort();
@@ -3646,7 +3777,7 @@ bool TestFastPropertiesIntegrityLevel(Map map, PropertyAttributes level) {
DCHECK(!map.IsCustomElementsReceiverMap());
DCHECK(!map.is_dictionary_map());
- DescriptorArray descriptors = map.instance_descriptors();
+ DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
for (InternalIndex i : map.IterateOwnDescriptors()) {
if (descriptors.GetKey(i).IsPrivate()) continue;
PropertyDetails details = descriptors.GetDetails(i);
@@ -3665,8 +3796,13 @@ bool TestPropertiesIntegrityLevel(JSObject object, PropertyAttributes level) {
return TestFastPropertiesIntegrityLevel(object.map(), level);
}
- return TestDictionaryPropertiesIntegrityLevel(
- object.property_dictionary(), object.GetReadOnlyRoots(), level);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ return TestDictionaryPropertiesIntegrityLevel(
+ object.property_dictionary_ordered(), object.GetReadOnlyRoots(), level);
+ } else {
+ return TestDictionaryPropertiesIntegrityLevel(
+ object.property_dictionary(), object.GetReadOnlyRoots(), level);
+ }
}
bool TestElementsIntegrityLevel(JSObject object, PropertyAttributes level) {
@@ -3964,6 +4100,11 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
JSGlobalObject::cast(*object).global_dictionary(), isolate);
JSObject::ApplyAttributesToDictionary(isolate, roots, dictionary,
attrs);
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ Handle<OrderedNameDictionary> dictionary(
+ object->property_dictionary_ordered(), isolate);
+ JSObject::ApplyAttributesToDictionary(isolate, roots, dictionary,
+ attrs);
} else {
Handle<NameDictionary> dictionary(object->property_dictionary(),
isolate);
@@ -4181,7 +4322,7 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
Object JSObject::SlowReverseLookup(Object value) {
if (HasFastProperties()) {
- DescriptorArray descs = map().instance_descriptors();
+ DescriptorArray descs = map().instance_descriptors(kRelaxedLoad);
bool value_is_number = value.IsNumber();
for (InternalIndex i : map().IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
@@ -4219,6 +4360,8 @@ Object JSObject::SlowReverseLookup(Object value) {
} else if (IsJSGlobalObject()) {
return JSGlobalObject::cast(*this).global_dictionary().SlowReverseLookup(
value);
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ return property_dictionary_ordered().SlowReverseLookup(GetIsolate(), value);
} else {
return property_dictionary().SlowReverseLookup(value);
}
@@ -4620,7 +4763,7 @@ static bool ShouldConvertToFastElements(JSObject object,
} else {
*new_capacity = dictionary.max_number_key() + 1;
}
- *new_capacity = Max(index + 1, *new_capacity);
+ *new_capacity = std::max(index + 1, *new_capacity);
uint32_t dictionary_size = static_cast<uint32_t>(dictionary.Capacity()) *
NumberDictionary::kEntrySize;
@@ -4881,6 +5024,10 @@ bool JSObject::IsDroppableApiWrapper() {
instance_type == JS_SPECIAL_API_OBJECT_TYPE;
}
+bool JSGlobalProxy::IsDetached() const {
+ return native_context().IsNull(GetIsolate());
+}
+
void JSGlobalObject::InvalidatePropertyCell(Handle<JSGlobalObject> global,
Handle<Name> name) {
// Regardless of whether the property is there or not invalidate
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index 11e8273fcb..cc9cc0f1dc 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -10,7 +10,6 @@
#include "src/objects/internal-index.h"
#include "src/objects/objects.h"
#include "src/objects/property-array.h"
-#include "torque-generated/class-definitions.h"
#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
@@ -29,6 +28,8 @@ class JSGlobalProxy;
class NativeContext;
class IsCompiledScope;
+#include "torque-generated/src/objects/js-objects-tq.inc"
+
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
class JSReceiver : public HeapObject {
@@ -43,9 +44,14 @@ class JSReceiver : public HeapObject {
// map.
DECL_GETTER(property_array, PropertyArray)
- // Gets slow properties for non-global objects.
+ // Gets slow properties for non-global objects (if v8_dict_mode_prototypes is
+ // not set).
DECL_GETTER(property_dictionary, NameDictionary)
+ // Gets slow properties for non-global objects (if v8_dict_mode_prototypes is
+ // set).
+ DECL_GETTER(property_dictionary_ordered, OrderedNameDictionary)
+
// Sets the properties backing store and makes sure any existing hash is moved
// to the new properties store. To clear out the properties store, pass in the
// empty_fixed_array(), the hash will be maintained in this case as well.
@@ -279,6 +285,9 @@ class JSReceiver : public HeapObject {
TORQUE_GENERATED_JS_RECEIVER_FIELDS)
bool HasProxyInPrototype(Isolate* isolate);
+ // TC39 "Dynamic Code Brand Checks"
+ bool IsCodeLike(Isolate* isolate) const;
+
OBJECT_CONSTRUCTORS(JSReceiver, HeapObject);
};
@@ -567,6 +576,7 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
static inline int GetEmbedderFieldCount(Map map);
inline int GetEmbedderFieldCount() const;
inline int GetEmbedderFieldOffset(int index);
+ inline void InitializeEmbedderField(Isolate* isolate, int index);
inline Object GetEmbedderField(int index);
inline void SetEmbedderField(int index, Object value);
inline void SetEmbedderField(int index, Smi value);
@@ -620,16 +630,14 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
const char* reason);
inline bool IsUnboxedDoubleField(FieldIndex index) const;
- inline bool IsUnboxedDoubleField(const Isolate* isolate,
- FieldIndex index) const;
+ inline bool IsUnboxedDoubleField(IsolateRoot isolate, FieldIndex index) const;
// Access fast-case object properties at index.
static Handle<Object> FastPropertyAt(Handle<JSObject> object,
Representation representation,
FieldIndex index);
inline Object RawFastPropertyAt(FieldIndex index) const;
- inline Object RawFastPropertyAt(const Isolate* isolate,
- FieldIndex index) const;
+ inline Object RawFastPropertyAt(IsolateRoot isolate, FieldIndex index) const;
inline double RawFastDoublePropertyAt(FieldIndex index) const;
inline uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const;
@@ -724,7 +732,7 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
// If a GC was caused while constructing this object, the elements pointer
// may point to a one pointer filler map. The object won't be rooted, but
// our heap verification code could stumble across it.
- V8_EXPORT_PRIVATE bool ElementsAreSafeToExamine(const Isolate* isolate) const;
+ V8_EXPORT_PRIVATE bool ElementsAreSafeToExamine(IsolateRoot isolate) const;
#endif
Object SlowReverseLookup(Object value);
@@ -938,6 +946,7 @@ class JSGlobalProxy
: public TorqueGeneratedJSGlobalProxy<JSGlobalProxy, JSSpecialObject> {
public:
inline bool IsDetachedFrom(JSGlobalObject global) const;
+ V8_EXPORT_PRIVATE bool IsDetached() const;
static int SizeWithEmbedderFields(int embedder_field_count);
diff --git a/deps/v8/src/objects/js-objects.tq b/deps/v8/src/objects/js-objects.tq
index 1139deeb3d..8dbe1dce03 100644
--- a/deps/v8/src/objects/js-objects.tq
+++ b/deps/v8/src/objects/js-objects.tq
@@ -53,19 +53,6 @@ extern class JSCustomElementsObject extends JSObject {
extern class JSSpecialObject extends JSCustomElementsObject {
}
-@highestInstanceTypeWithinParentClassRange
-extern class JSFunction extends JSFunctionOrBoundFunction {
- shared_function_info: SharedFunctionInfo;
- context: Context;
- feedback_cell: FeedbackCell;
- weak code: Code;
-
- // Space for the following field may or may not be allocated.
- @noVerifier weak prototype_or_initial_map: JSReceiver|Map;
-}
-
-type JSFunctionWithPrototypeSlot extends JSFunction;
-
macro GetDerivedMap(implicit context: Context)(
target: JSFunction, newTarget: JSReceiver): Map {
try {
@@ -128,24 +115,6 @@ extern class JSMessageObject extends JSObject {
error_level: Smi;
}
-@abstract
-@generateCppClass
-@highestInstanceTypeWithinParentClassRange
-extern class JSFunctionOrBoundFunction extends JSObject {
-}
-
-@generateCppClass
-extern class JSBoundFunction extends JSFunctionOrBoundFunction {
- // The wrapped function object.
- bound_target_function: Callable;
- // The value that is always passed as the this value when calling the wrapped
- // function.
- bound_this: JSAny|SourceTextModule;
- // A list of values whose elements are used as the first arguments to any call
- // to the wrapped function.
- bound_arguments: FixedArray;
-}
-
@generateCppClass
extern class JSDate extends JSObject {
// If one component is NaN, all of them are, indicating a NaN time value.
diff --git a/deps/v8/src/objects/js-plural-rules-inl.h b/deps/v8/src/objects/js-plural-rules-inl.h
index 60340931fe..fb4a97e476 100644
--- a/deps/v8/src/objects/js-plural-rules-inl.h
+++ b/deps/v8/src/objects/js-plural-rules-inl.h
@@ -19,6 +19,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-plural-rules-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSPluralRules)
ACCESSORS(JSPluralRules, icu_plural_rules, Managed<icu::PluralRules>,
diff --git a/deps/v8/src/objects/js-plural-rules.h b/deps/v8/src/objects/js-plural-rules.h
index eac9d5e92a..bd0bfe65f7 100644
--- a/deps/v8/src/objects/js-plural-rules.h
+++ b/deps/v8/src/objects/js-plural-rules.h
@@ -32,6 +32,8 @@ class LocalizedNumberFormatter;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-plural-rules-tq.inc"
+
class JSPluralRules
: public TorqueGeneratedJSPluralRules<JSPluralRules, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-plural-rules.tq b/deps/v8/src/objects/js-plural-rules.tq
new file mode 100644
index 0000000000..818cff5787
--- /dev/null
+++ b/deps/v8/src/objects/js-plural-rules.tq
@@ -0,0 +1,19 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-plural-rules.h'
+
+type JSPluralRulesType extends int32 constexpr 'JSPluralRules::Type';
+bitfield struct JSPluralRulesFlags extends uint31 {
+ Type: JSPluralRulesType: 1 bit; // "type" is a reserved word.
+}
+
+@generateCppClass
+extern class JSPluralRules extends JSObject {
+ locale: String;
+ flags: SmiTagged<JSPluralRulesFlags>;
+ icu_plural_rules: Foreign; // Managed<icu::PluralRules>
+ icu_number_formatter:
+ Foreign; // Managed<icu::number::LocalizedNumberFormatter>
+}
diff --git a/deps/v8/src/objects/js-promise-inl.h b/deps/v8/src/objects/js-promise-inl.h
index 601de6612b..6f1c316c48 100644
--- a/deps/v8/src/objects/js-promise-inl.h
+++ b/deps/v8/src/objects/js-promise-inl.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-promise-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSPromise)
BOOL_ACCESSORS(JSPromise, flags, has_handler, HasHandlerBit::kShift)
diff --git a/deps/v8/src/objects/js-promise.h b/deps/v8/src/objects/js-promise.h
index 2028bc3f8b..8ef663bb39 100644
--- a/deps/v8/src/objects/js-promise.h
+++ b/deps/v8/src/objects/js-promise.h
@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-promise-tq.inc"
+
// Representation of promise objects in the specification. Our layout of
// JSPromise differs a bit from the layout in the specification, for example
// there's only a single list of PromiseReaction objects, instead of separate
diff --git a/deps/v8/src/objects/js-proxy-inl.h b/deps/v8/src/objects/js-proxy-inl.h
index 0683cfeec8..9abe4c08d1 100644
--- a/deps/v8/src/objects/js-proxy-inl.h
+++ b/deps/v8/src/objects/js-proxy-inl.h
@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-proxy-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSProxy)
bool JSProxy::IsRevoked() const { return !handler().IsJSReceiver(); }
diff --git a/deps/v8/src/objects/js-proxy.h b/deps/v8/src/objects/js-proxy.h
index 1161f71486..28da615da5 100644
--- a/deps/v8/src/objects/js-proxy.h
+++ b/deps/v8/src/objects/js-proxy.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-proxy-tq.inc"
+
// The JSProxy describes EcmaScript Harmony proxies
class JSProxy : public TorqueGeneratedJSProxy<JSProxy, JSReceiver> {
public:
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
index 48fe911ff5..8b99aa7c4c 100644
--- a/deps/v8/src/objects/js-regexp-inl.h
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-regexp-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExp)
OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER(JSRegExpResult, JSArray)
OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER(JSRegExpResultIndices, JSArray)
diff --git a/deps/v8/src/objects/js-regexp-string-iterator-inl.h b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
index b0d8e4c5ec..acd724de5c 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator-inl.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-regexp-string-iterator-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpStringIterator)
BOOL_ACCESSORS(JSRegExpStringIterator, flags, done, DoneBit::kShift)
diff --git a/deps/v8/src/objects/js-regexp-string-iterator.h b/deps/v8/src/objects/js-regexp-string-iterator.h
index c5f2e33421..8991db82b5 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-regexp-string-iterator-tq.inc"
+
class JSRegExpStringIterator
: public TorqueGeneratedJSRegExpStringIterator<JSRegExpStringIterator,
JSObject> {
diff --git a/deps/v8/src/objects/js-regexp.cc b/deps/v8/src/objects/js-regexp.cc
index f0317a23f5..eb2bb1c432 100644
--- a/deps/v8/src/objects/js-regexp.cc
+++ b/deps/v8/src/objects/js-regexp.cc
@@ -171,13 +171,6 @@ uint32_t JSRegExp::BacktrackLimit() const {
// static
JSRegExp::Flags JSRegExp::FlagsFromString(Isolate* isolate,
Handle<String> flags, bool* success) {
- STATIC_ASSERT(*JSRegExp::FlagFromChar('g') == JSRegExp::kGlobal);
- STATIC_ASSERT(*JSRegExp::FlagFromChar('i') == JSRegExp::kIgnoreCase);
- STATIC_ASSERT(*JSRegExp::FlagFromChar('m') == JSRegExp::kMultiline);
- STATIC_ASSERT(*JSRegExp::FlagFromChar('s') == JSRegExp::kDotAll);
- STATIC_ASSERT(*JSRegExp::FlagFromChar('u') == JSRegExp::kUnicode);
- STATIC_ASSERT(*JSRegExp::FlagFromChar('y') == JSRegExp::kSticky);
-
int length = flags->length();
if (length == 0) {
*success = true;
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index f9618e5266..b1d1399eab 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-regexp-tq.inc"
+
// Regular expressions
// The regular expression holds a single reference to a FixedArray in
// the kDataOffset field.
@@ -40,8 +42,8 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
enum Type { NOT_COMPILED, ATOM, IRREGEXP, EXPERIMENTAL };
DEFINE_TORQUE_GENERATED_JS_REG_EXP_FLAGS()
- static constexpr base::Optional<Flag> FlagFromChar(char c) {
- STATIC_ASSERT(kFlagCount == 6);
+ static base::Optional<Flag> FlagFromChar(char c) {
+ STATIC_ASSERT(kFlagCount == 7);
// clang-format off
return c == 'g' ? base::Optional<Flag>(kGlobal)
: c == 'i' ? base::Optional<Flag>(kIgnoreCase)
@@ -49,6 +51,8 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
: c == 'y' ? base::Optional<Flag>(kSticky)
: c == 'u' ? base::Optional<Flag>(kUnicode)
: c == 's' ? base::Optional<Flag>(kDotAll)
+ : (FLAG_enable_experimental_regexp_engine && c == 'l')
+ ? base::Optional<Flag>(kLinear)
: base::Optional<Flag>();
// clang-format on
}
@@ -60,6 +64,7 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
STATIC_ASSERT(static_cast<int>(kSticky) == v8::RegExp::kSticky);
STATIC_ASSERT(static_cast<int>(kUnicode) == v8::RegExp::kUnicode);
STATIC_ASSERT(static_cast<int>(kDotAll) == v8::RegExp::kDotAll);
+ STATIC_ASSERT(static_cast<int>(kLinear) == v8::RegExp::kLinear);
STATIC_ASSERT(kFlagCount == v8::RegExp::kFlagCount);
DECL_ACCESSORS(last_index, Object)
diff --git a/deps/v8/src/objects/js-regexp.tq b/deps/v8/src/objects/js-regexp.tq
index 35e77114ba..6d3fc113cd 100644
--- a/deps/v8/src/objects/js-regexp.tq
+++ b/deps/v8/src/objects/js-regexp.tq
@@ -9,6 +9,7 @@ bitfield struct JSRegExpFlags extends uint31 {
sticky: bool: 1 bit;
unicode: bool: 1 bit;
dot_all: bool: 1 bit;
+ linear: bool: 1 bit;
}
@generateCppClass
diff --git a/deps/v8/src/objects/js-relative-time-format-inl.h b/deps/v8/src/objects/js-relative-time-format-inl.h
index 52d9d12261..4afdaa3088 100644
--- a/deps/v8/src/objects/js-relative-time-format-inl.h
+++ b/deps/v8/src/objects/js-relative-time-format-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-relative-time-format-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSRelativeTimeFormat)
// Base relative time format accessors.
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index 267343aaae..a2fab9ddc8 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -195,9 +195,12 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
}
}
- icu::DecimalFormat* decimal_format =
- static_cast<icu::DecimalFormat*>(number_format);
- decimal_format->setMinimumGroupingDigits(-2);
+ if (number_format->getDynamicClassID() ==
+ icu::DecimalFormat::getStaticClassID()) {
+ icu::DecimalFormat* decimal_format =
+ static_cast<icu::DecimalFormat*>(number_format);
+ decimal_format->setMinimumGroupingDigits(-2);
+ }
// Change UDISPCTX_CAPITALIZATION_NONE to other values if
// ECMA402 later include option to change capitalization.
diff --git a/deps/v8/src/objects/js-relative-time-format.h b/deps/v8/src/objects/js-relative-time-format.h
index 79e079b05b..444082cf0e 100644
--- a/deps/v8/src/objects/js-relative-time-format.h
+++ b/deps/v8/src/objects/js-relative-time-format.h
@@ -29,6 +29,8 @@ class RelativeDateTimeFormatter;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-relative-time-format-tq.inc"
+
class JSRelativeTimeFormat
: public TorqueGeneratedJSRelativeTimeFormat<JSRelativeTimeFormat,
JSObject> {
diff --git a/deps/v8/src/objects/js-relative-time-format.tq b/deps/v8/src/objects/js-relative-time-format.tq
new file mode 100644
index 0000000000..70b5e82245
--- /dev/null
+++ b/deps/v8/src/objects/js-relative-time-format.tq
@@ -0,0 +1,19 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-relative-time-format.h'
+
+type JSRelativeTimeFormatNumeric extends int32
+constexpr 'JSRelativeTimeFormat::Numeric';
+bitfield struct JSRelativeTimeFormatFlags extends uint31 {
+ numeric: JSRelativeTimeFormatNumeric: 1 bit;
+}
+
+@generateCppClass
+extern class JSRelativeTimeFormat extends JSObject {
+ locale: String;
+ numberingSystem: String;
+ icu_formatter: Foreign; // Managed<icu::RelativeDateTimeFormatter>
+ flags: SmiTagged<JSRelativeTimeFormatFlags>;
+}
diff --git a/deps/v8/src/objects/js-segment-iterator-inl.h b/deps/v8/src/objects/js-segment-iterator-inl.h
index e6a1c4a53d..979a1c796b 100644
--- a/deps/v8/src/objects/js-segment-iterator-inl.h
+++ b/deps/v8/src/objects/js-segment-iterator-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-segment-iterator-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSSegmentIterator)
// Base segment iterator accessors.
diff --git a/deps/v8/src/objects/js-segment-iterator.h b/deps/v8/src/objects/js-segment-iterator.h
index 45e03c06fa..bcbc22df37 100644
--- a/deps/v8/src/objects/js-segment-iterator.h
+++ b/deps/v8/src/objects/js-segment-iterator.h
@@ -27,6 +27,8 @@ class UnicodeString;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-segment-iterator-tq.inc"
+
class JSSegmentIterator
: public TorqueGeneratedJSSegmentIterator<JSSegmentIterator, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-segment-iterator.tq b/deps/v8/src/objects/js-segment-iterator.tq
new file mode 100644
index 0000000000..502070cefd
--- /dev/null
+++ b/deps/v8/src/objects/js-segment-iterator.tq
@@ -0,0 +1,16 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-segment-iterator.h'
+
+bitfield struct JSSegmentIteratorFlags extends uint31 {
+ granularity: JSSegmenterGranularity: 2 bit;
+}
+
+@generateCppClass
+extern class JSSegmentIterator extends JSObject {
+ icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
+ unicode_string: Foreign; // Managed<icu::UnicodeString>
+ flags: SmiTagged<JSSegmentIteratorFlags>;
+}
diff --git a/deps/v8/src/objects/js-segmenter-inl.h b/deps/v8/src/objects/js-segmenter-inl.h
index 98bc2e863b..e6744268c4 100644
--- a/deps/v8/src/objects/js-segmenter-inl.h
+++ b/deps/v8/src/objects/js-segmenter-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-segmenter-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSSegmenter)
// Base segmenter accessors.
diff --git a/deps/v8/src/objects/js-segmenter.h b/deps/v8/src/objects/js-segmenter.h
index e462042711..512625d204 100644
--- a/deps/v8/src/objects/js-segmenter.h
+++ b/deps/v8/src/objects/js-segmenter.h
@@ -28,6 +28,8 @@ class BreakIterator;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-segmenter-tq.inc"
+
class JSSegmenter : public TorqueGeneratedJSSegmenter<JSSegmenter, JSObject> {
public:
// Creates segmenter object with properties derived from input locales and
diff --git a/deps/v8/src/objects/js-segmenter.tq b/deps/v8/src/objects/js-segmenter.tq
new file mode 100644
index 0000000000..fdd888b428
--- /dev/null
+++ b/deps/v8/src/objects/js-segmenter.tq
@@ -0,0 +1,18 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-segmenter.h'
+
+type JSSegmenterGranularity extends int32
+constexpr 'JSSegmenter::Granularity';
+bitfield struct JSSegmenterFlags extends uint31 {
+ granularity: JSSegmenterGranularity: 2 bit;
+}
+
+@generateCppClass
+extern class JSSegmenter extends JSObject {
+ locale: String;
+ icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
+ flags: SmiTagged<JSSegmenterFlags>;
+}
diff --git a/deps/v8/src/objects/js-segments-inl.h b/deps/v8/src/objects/js-segments-inl.h
index ceabd6741d..37fc4964e0 100644
--- a/deps/v8/src/objects/js-segments-inl.h
+++ b/deps/v8/src/objects/js-segments-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-segments-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSSegments)
// Base segments accessors.
diff --git a/deps/v8/src/objects/js-segments.h b/deps/v8/src/objects/js-segments.h
index b33323d6f9..30c387fea6 100644
--- a/deps/v8/src/objects/js-segments.h
+++ b/deps/v8/src/objects/js-segments.h
@@ -27,6 +27,8 @@ class UnicodeString;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-segments-tq.inc"
+
class JSSegments : public TorqueGeneratedJSSegments<JSSegments, JSObject> {
public:
// ecma402 #sec-createsegmentsobject
diff --git a/deps/v8/src/objects/js-segments.tq b/deps/v8/src/objects/js-segments.tq
new file mode 100644
index 0000000000..f891e26ca0
--- /dev/null
+++ b/deps/v8/src/objects/js-segments.tq
@@ -0,0 +1,16 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-segments.h'
+
+bitfield struct JSSegmentsFlags extends uint31 {
+ granularity: JSSegmenterGranularity: 2 bit;
+}
+
+@generateCppClass
+extern class JSSegments extends JSObject {
+ icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
+ unicode_string: Foreign; // Managed<icu::UnicodeString>
+ flags: SmiTagged<JSSegmentsFlags>;
+}
diff --git a/deps/v8/src/objects/js-weak-refs-inl.h b/deps/v8/src/objects/js-weak-refs-inl.h
index 8b1bb6eaec..193544e1c2 100644
--- a/deps/v8/src/objects/js-weak-refs-inl.h
+++ b/deps/v8/src/objects/js-weak-refs-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-weak-refs-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(WeakCell)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakRef)
OBJECT_CONSTRUCTORS_IMPL(JSFinalizationRegistry, JSObject)
diff --git a/deps/v8/src/objects/js-weak-refs.h b/deps/v8/src/objects/js-weak-refs.h
index 2aa0a4ff2d..300673381a 100644
--- a/deps/v8/src/objects/js-weak-refs.h
+++ b/deps/v8/src/objects/js-weak-refs.h
@@ -17,6 +17,8 @@ namespace internal {
class NativeContext;
class WeakCell;
+#include "torque-generated/src/objects/js-weak-refs-tq.inc"
+
// FinalizationRegistry object from the JS Weak Refs spec proposal:
// https://github.com/tc39/proposal-weakrefs
class JSFinalizationRegistry : public JSObject {
diff --git a/deps/v8/src/objects/keys.cc b/deps/v8/src/objects/keys.cc
index ff6ea1fb57..ba5fa9c928 100644
--- a/deps/v8/src/objects/keys.cc
+++ b/deps/v8/src/objects/keys.cc
@@ -5,6 +5,7 @@
#include "src/objects/keys.h"
#include "src/api/api-arguments-inl.h"
+#include "src/common/globals.h"
#include "src/execution/isolate-inl.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
@@ -17,6 +18,7 @@
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/property-descriptor.h"
#include "src/objects/prototype.h"
+#include "src/objects/slots-atomic-inl.h"
#include "src/utils/identity-map.h"
#include "src/zone/zone-hashmap.h"
@@ -67,7 +69,8 @@ static Handle<FixedArray> CombineKeys(Isolate* isolate,
int nof_descriptors = map.NumberOfOwnDescriptors();
if (nof_descriptors == 0 && !may_have_elements) return prototype_chain_keys;
- Handle<DescriptorArray> descs(map.instance_descriptors(), isolate);
+ Handle<DescriptorArray> descs(map.instance_descriptors(kRelaxedLoad),
+ isolate);
int own_keys_length = own_keys.is_null() ? 0 : own_keys->length();
Handle<FixedArray> combined_keys = isolate->factory()->NewFixedArray(
own_keys_length + prototype_chain_keys_length);
@@ -369,8 +372,8 @@ Handle<FixedArray> ReduceFixedArrayTo(Isolate* isolate,
Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
Handle<JSObject> object) {
Handle<Map> map(object->map(), isolate);
- Handle<FixedArray> keys(map->instance_descriptors().enum_cache().keys(),
- isolate);
+ Handle<FixedArray> keys(
+ map->instance_descriptors(kRelaxedLoad).enum_cache().keys(), isolate);
// Check if the {map} has a valid enum length, which implies that it
// must have a valid enum cache as well.
@@ -395,7 +398,7 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
}
Handle<DescriptorArray> descriptors =
- Handle<DescriptorArray>(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray>(map->instance_descriptors(kRelaxedLoad), isolate);
isolate->counters()->enum_cache_misses()->Increment();
// Create the keys array.
@@ -651,14 +654,11 @@ bool FastKeyAccumulator::TryPrototypeInfoCache(Handle<JSReceiver> receiver) {
return true;
}
-namespace {
-
-enum IndexedOrNamed { kIndexed, kNamed };
-
-V8_WARN_UNUSED_RESULT ExceptionStatus FilterForEnumerableProperties(
+V8_WARN_UNUSED_RESULT ExceptionStatus
+KeyAccumulator::FilterForEnumerableProperties(
Handle<JSReceiver> receiver, Handle<JSObject> object,
- Handle<InterceptorInfo> interceptor, KeyAccumulator* accumulator,
- Handle<JSObject> result, IndexedOrNamed type) {
+ Handle<InterceptorInfo> interceptor, Handle<JSObject> result,
+ IndexedOrNamed type) {
DCHECK(result->IsJSArray() || result->HasSloppyArgumentsElements());
ElementsAccessor* accessor = result->GetElementsAccessor();
@@ -667,8 +667,8 @@ V8_WARN_UNUSED_RESULT ExceptionStatus FilterForEnumerableProperties(
if (!accessor->HasEntry(*result, entry)) continue;
// args are invalid after args.Call(), create a new one in every iteration.
- PropertyCallbackArguments args(accumulator->isolate(), interceptor->data(),
- *receiver, *object, Just(kDontThrow));
+ PropertyCallbackArguments args(isolate_, interceptor->data(), *receiver,
+ *object, Just(kDontThrow));
Handle<Object> element = accessor->Get(result, entry);
Handle<Object> attributes;
@@ -686,8 +686,7 @@ V8_WARN_UNUSED_RESULT ExceptionStatus FilterForEnumerableProperties(
int32_t value;
CHECK(attributes->ToInt32(&value));
if ((value & DONT_ENUM) == 0) {
- RETURN_FAILURE_IF_NOT_SUCCESSFUL(
- accumulator->AddKey(element, DO_NOT_CONVERT));
+ RETURN_FAILURE_IF_NOT_SUCCESSFUL(AddKey(element, DO_NOT_CONVERT));
}
}
}
@@ -695,17 +694,14 @@ V8_WARN_UNUSED_RESULT ExceptionStatus FilterForEnumerableProperties(
}
// Returns |true| on success, |nothing| on exception.
-Maybe<bool> CollectInterceptorKeysInternal(Handle<JSReceiver> receiver,
- Handle<JSObject> object,
- Handle<InterceptorInfo> interceptor,
- KeyAccumulator* accumulator,
- IndexedOrNamed type) {
- Isolate* isolate = accumulator->isolate();
- PropertyCallbackArguments enum_args(isolate, interceptor->data(), *receiver,
+Maybe<bool> KeyAccumulator::CollectInterceptorKeysInternal(
+ Handle<JSReceiver> receiver, Handle<JSObject> object,
+ Handle<InterceptorInfo> interceptor, IndexedOrNamed type) {
+ PropertyCallbackArguments enum_args(isolate_, interceptor->data(), *receiver,
*object, Just(kDontThrow));
Handle<JSObject> result;
- if (!interceptor->enumerator().IsUndefined(isolate)) {
+ if (!interceptor->enumerator().IsUndefined(isolate_)) {
if (type == kIndexed) {
result = enum_args.CallIndexedEnumerator(interceptor);
} else {
@@ -713,25 +709,23 @@ Maybe<bool> CollectInterceptorKeysInternal(Handle<JSReceiver> receiver,
result = enum_args.CallNamedEnumerator(interceptor);
}
}
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate_, Nothing<bool>());
if (result.is_null()) return Just(true);
- if ((accumulator->filter() & ONLY_ENUMERABLE) &&
- !interceptor->query().IsUndefined(isolate)) {
+ if ((filter_ & ONLY_ENUMERABLE) &&
+ !interceptor->query().IsUndefined(isolate_)) {
RETURN_NOTHING_IF_NOT_SUCCESSFUL(FilterForEnumerableProperties(
- receiver, object, interceptor, accumulator, result, type));
+ receiver, object, interceptor, result, type));
} else {
- RETURN_NOTHING_IF_NOT_SUCCESSFUL(accumulator->AddKeys(
+ RETURN_NOTHING_IF_NOT_SUCCESSFUL(AddKeys(
result, type == kIndexed ? CONVERT_TO_ARRAY_INDEX : DO_NOT_CONVERT));
}
return Just(true);
}
-Maybe<bool> CollectInterceptorKeys(Handle<JSReceiver> receiver,
- Handle<JSObject> object,
- KeyAccumulator* accumulator,
- IndexedOrNamed type) {
- Isolate* isolate = accumulator->isolate();
+Maybe<bool> KeyAccumulator::CollectInterceptorKeys(Handle<JSReceiver> receiver,
+ Handle<JSObject> object,
+ IndexedOrNamed type) {
if (type == kIndexed) {
if (!object->HasIndexedInterceptor()) return Just(true);
} else {
@@ -740,17 +734,13 @@ Maybe<bool> CollectInterceptorKeys(Handle<JSReceiver> receiver,
Handle<InterceptorInfo> interceptor(type == kIndexed
? object->GetIndexedInterceptor()
: object->GetNamedInterceptor(),
- isolate);
- if ((accumulator->filter() & ONLY_ALL_CAN_READ) &&
- !interceptor->all_can_read()) {
+ isolate_);
+ if ((filter() & ONLY_ALL_CAN_READ) && !interceptor->all_can_read()) {
return Just(true);
}
- return CollectInterceptorKeysInternal(receiver, object, interceptor,
- accumulator, type);
+ return CollectInterceptorKeysInternal(receiver, object, interceptor, type);
}
-} // namespace
-
Maybe<bool> KeyAccumulator::CollectOwnElementIndices(
Handle<JSReceiver> receiver, Handle<JSObject> object) {
if (filter_ & SKIP_STRINGS || skip_indices_) return Just(true);
@@ -758,7 +748,7 @@ Maybe<bool> KeyAccumulator::CollectOwnElementIndices(
ElementsAccessor* accessor = object->GetElementsAccessor();
RETURN_NOTHING_IF_NOT_SUCCESSFUL(
accessor->CollectElementIndices(object, this));
- return CollectInterceptorKeys(receiver, object, this, kIndexed);
+ return CollectInterceptorKeys(receiver, object, kIndexed);
}
namespace {
@@ -810,6 +800,93 @@ base::Optional<int> CollectOwnPropertyNamesInternal(
return first_skipped;
}
+// Logic shared between different specializations of CopyEnumKeysTo.
+template <typename Dictionary>
+void CommonCopyEnumKeysTo(Isolate* isolate, Handle<Dictionary> dictionary,
+ Handle<FixedArray> storage, KeyCollectionMode mode,
+ KeyAccumulator* accumulator) {
+ DCHECK_IMPLIES(mode != KeyCollectionMode::kOwnOnly, accumulator != nullptr);
+ int length = storage->length();
+ int properties = 0;
+ ReadOnlyRoots roots(isolate);
+
+ AllowHeapAllocation allow_gc;
+ for (InternalIndex i : dictionary->IterateEntries()) {
+ Object key;
+ if (!dictionary->ToKey(roots, i, &key)) continue;
+ bool is_shadowing_key = false;
+ if (key.IsSymbol()) continue;
+ PropertyDetails details = dictionary->DetailsAt(i);
+ if (details.IsDontEnum()) {
+ if (mode == KeyCollectionMode::kIncludePrototypes) {
+ is_shadowing_key = true;
+ } else {
+ continue;
+ }
+ }
+ if (is_shadowing_key) {
+ // This might allocate, but {key} is not used afterwards.
+ accumulator->AddShadowingKey(key, &allow_gc);
+ continue;
+ } else {
+ if (Dictionary::kIsOrderedDictionaryType) {
+ storage->set(properties, dictionary->ValueAt(i));
+ } else {
+ // If the dictionary does not store elements in enumeration order,
+ // we need to sort it afterwards in CopyEnumKeysTo. To enable this we
+ // need to store indices at this point, rather than the values at the
+ // given indices.
+ storage->set(properties, Smi::FromInt(i.as_int()));
+ }
+ }
+ properties++;
+ if (mode == KeyCollectionMode::kOwnOnly && properties == length) break;
+ }
+
+ CHECK_EQ(length, properties);
+}
+
+// Copies enumerable keys to preallocated fixed array.
+// Does not throw for uninitialized exports in module namespace objects, so
+// this has to be checked separately.
+template <typename Dictionary>
+void CopyEnumKeysTo(Isolate* isolate, Handle<Dictionary> dictionary,
+ Handle<FixedArray> storage, KeyCollectionMode mode,
+ KeyAccumulator* accumulator) {
+ STATIC_ASSERT(!Dictionary::kIsOrderedDictionaryType);
+
+ CommonCopyEnumKeysTo<Dictionary>(isolate, dictionary, storage, mode,
+ accumulator);
+
+ int length = storage->length();
+
+ DisallowHeapAllocation no_gc;
+ Dictionary raw_dictionary = *dictionary;
+ FixedArray raw_storage = *storage;
+ EnumIndexComparator<Dictionary> cmp(raw_dictionary);
+ // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
+ // store operations that are safe for concurrent marking.
+ AtomicSlot start(storage->GetFirstElementAddress());
+ std::sort(start, start + length, cmp);
+ for (int i = 0; i < length; i++) {
+ InternalIndex index(Smi::ToInt(raw_storage.get(i)));
+ raw_storage.set(i, raw_dictionary.NameAt(index));
+ }
+}
+
+template <>
+void CopyEnumKeysTo(Isolate* isolate, Handle<OrderedNameDictionary> dictionary,
+ Handle<FixedArray> storage, KeyCollectionMode mode,
+ KeyAccumulator* accumulator) {
+ CommonCopyEnumKeysTo<OrderedNameDictionary>(isolate, dictionary, storage,
+ mode, accumulator);
+
+ // No need to sort, as CommonCopyEnumKeysTo on OrderedNameDictionary
+ // adds entries to |storage| in the dict's insertion order
+ // Further, the template argument true above means that |storage|
+ // now contains the actual values from |dictionary|, rather than indices.
+}
+
template <class T>
Handle<FixedArray> GetOwnEnumPropertyDictionaryKeys(Isolate* isolate,
KeyCollectionMode mode,
@@ -822,9 +899,83 @@ Handle<FixedArray> GetOwnEnumPropertyDictionaryKeys(Isolate* isolate,
}
int length = dictionary->NumberOfEnumerableProperties();
Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
- T::CopyEnumKeysTo(isolate, dictionary, storage, mode, accumulator);
+ CopyEnumKeysTo(isolate, dictionary, storage, mode, accumulator);
return storage;
}
+
+// Collect the keys from |dictionary| into |keys|, in ascending chronological
+// order of property creation.
+template <typename Dictionary>
+ExceptionStatus CollectKeysFromDictionary(Handle<Dictionary> dictionary,
+ KeyAccumulator* keys) {
+ Isolate* isolate = keys->isolate();
+ ReadOnlyRoots roots(isolate);
+ // TODO(jkummerow): Consider using a std::unique_ptr<InternalIndex[]> instead.
+ Handle<FixedArray> array =
+ isolate->factory()->NewFixedArray(dictionary->NumberOfElements());
+ int array_size = 0;
+ PropertyFilter filter = keys->filter();
+ // Handle enumerable strings in CopyEnumKeysTo.
+ DCHECK_NE(keys->filter(), ENUMERABLE_STRINGS);
+ {
+ DisallowHeapAllocation no_gc;
+ for (InternalIndex i : dictionary->IterateEntries()) {
+ Object key;
+ Dictionary raw_dictionary = *dictionary;
+ if (!raw_dictionary.ToKey(roots, i, &key)) continue;
+ if (key.FilterKey(filter)) continue;
+ PropertyDetails details = raw_dictionary.DetailsAt(i);
+ if ((details.attributes() & filter) != 0) {
+ AllowHeapAllocation gc;
+ // This might allocate, but {key} is not used afterwards.
+ keys->AddShadowingKey(key, &gc);
+ continue;
+ }
+ if (filter & ONLY_ALL_CAN_READ) {
+ if (details.kind() != kAccessor) continue;
+ Object accessors = raw_dictionary.ValueAt(i);
+ if (!accessors.IsAccessorInfo()) continue;
+ if (!AccessorInfo::cast(accessors).all_can_read()) continue;
+ }
+ // TODO(emrich): consider storing keys instead of indices into the array
+ // in case of ordered dictionary type.
+ array->set(array_size++, Smi::FromInt(i.as_int()));
+ }
+ if (!Dictionary::kIsOrderedDictionaryType) {
+ // Sorting only needed if it's an unordered dictionary,
+ // otherwise we traversed elements in insertion order
+
+ EnumIndexComparator<Dictionary> cmp(*dictionary);
+ // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
+ // store operations that are safe for concurrent marking.
+ AtomicSlot start(array->GetFirstElementAddress());
+ std::sort(start, start + array_size, cmp);
+ }
+ }
+
+ bool has_seen_symbol = false;
+ for (int i = 0; i < array_size; i++) {
+ InternalIndex index(Smi::ToInt(array->get(i)));
+ Object key = dictionary->NameAt(index);
+ if (key.IsSymbol()) {
+ has_seen_symbol = true;
+ continue;
+ }
+ ExceptionStatus status = keys->AddKey(key, DO_NOT_CONVERT);
+ if (!status) return status;
+ }
+ if (has_seen_symbol) {
+ for (int i = 0; i < array_size; i++) {
+ InternalIndex index(Smi::ToInt(array->get(i)));
+ Object key = dictionary->NameAt(index);
+ if (!key.IsSymbol()) continue;
+ ExceptionStatus status = keys->AddKey(key, DO_NOT_CONVERT);
+ if (!status) return status;
+ }
+ }
+ return ExceptionStatus::kSuccess;
+}
+
} // namespace
Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
@@ -840,8 +991,8 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
if (enum_keys->length() != nof_descriptors) {
if (map.prototype(isolate_) != ReadOnlyRoots(isolate_).null_value()) {
AllowHeapAllocation allow_gc;
- Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(map.instance_descriptors(), isolate_);
+ Handle<DescriptorArray> descs = Handle<DescriptorArray>(
+ map.instance_descriptors(kRelaxedLoad), isolate_);
for (InternalIndex i : InternalIndex::Range(nof_descriptors)) {
PropertyDetails details = descs->GetDetails(i);
if (!details.IsDontEnum()) continue;
@@ -853,6 +1004,9 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
enum_keys = GetOwnEnumPropertyDictionaryKeys(
isolate_, mode_, this, object,
JSGlobalObject::cast(*object).global_dictionary());
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ enum_keys = GetOwnEnumPropertyDictionaryKeys(
+ isolate_, mode_, this, object, object->property_dictionary_ordered());
} else {
enum_keys = GetOwnEnumPropertyDictionaryKeys(
isolate_, mode_, this, object, object->property_dictionary());
@@ -873,8 +1027,8 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
} else {
if (object->HasFastProperties()) {
int limit = object->map().NumberOfOwnDescriptors();
- Handle<DescriptorArray> descs(object->map().instance_descriptors(),
- isolate_);
+ Handle<DescriptorArray> descs(
+ object->map().instance_descriptors(kRelaxedLoad), isolate_);
// First collect the strings,
base::Optional<int> first_symbol =
CollectOwnPropertyNamesInternal<true>(object, this, descs, 0, limit);
@@ -885,16 +1039,19 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
object, this, descs, first_symbol.value(), limit));
}
} else if (object->IsJSGlobalObject()) {
- RETURN_NOTHING_IF_NOT_SUCCESSFUL(GlobalDictionary::CollectKeysTo(
+ RETURN_NOTHING_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
handle(JSGlobalObject::cast(*object).global_dictionary(), isolate_),
this));
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ RETURN_NOTHING_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
+ handle(object->property_dictionary_ordered(), isolate_), this));
} else {
- RETURN_NOTHING_IF_NOT_SUCCESSFUL(NameDictionary::CollectKeysTo(
+ RETURN_NOTHING_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
handle(object->property_dictionary(), isolate_), this));
}
}
// Add the property keys from the interceptor.
- return CollectInterceptorKeys(receiver, object, this, kNamed);
+ return CollectInterceptorKeys(receiver, object, kNamed);
}
ExceptionStatus KeyAccumulator::CollectPrivateNames(Handle<JSReceiver> receiver,
@@ -902,15 +1059,18 @@ ExceptionStatus KeyAccumulator::CollectPrivateNames(Handle<JSReceiver> receiver,
DCHECK_EQ(mode_, KeyCollectionMode::kOwnOnly);
if (object->HasFastProperties()) {
int limit = object->map().NumberOfOwnDescriptors();
- Handle<DescriptorArray> descs(object->map().instance_descriptors(),
- isolate_);
+ Handle<DescriptorArray> descs(
+ object->map().instance_descriptors(kRelaxedLoad), isolate_);
CollectOwnPropertyNamesInternal<false>(object, this, descs, 0, limit);
} else if (object->IsJSGlobalObject()) {
- RETURN_FAILURE_IF_NOT_SUCCESSFUL(GlobalDictionary::CollectKeysTo(
+ RETURN_FAILURE_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
handle(JSGlobalObject::cast(*object).global_dictionary(), isolate_),
this));
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ RETURN_FAILURE_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
+ handle(object->property_dictionary_ordered(), isolate_), this));
} else {
- RETURN_FAILURE_IF_NOT_SUCCESSFUL(NameDictionary::CollectKeysTo(
+ RETURN_FAILURE_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
handle(object->property_dictionary(), isolate_), this));
}
return ExceptionStatus::kSuccess;
@@ -925,7 +1085,7 @@ Maybe<bool> KeyAccumulator::CollectAccessCheckInterceptorKeys(
handle(InterceptorInfo::cast(
access_check_info->indexed_interceptor()),
isolate_),
- this, kIndexed)),
+ kIndexed)),
Nothing<bool>());
}
MAYBE_RETURN(
@@ -933,7 +1093,7 @@ Maybe<bool> KeyAccumulator::CollectAccessCheckInterceptorKeys(
receiver, object,
handle(InterceptorInfo::cast(access_check_info->named_interceptor()),
isolate_),
- this, kNamed)),
+ kNamed)),
Nothing<bool>());
return Just(true);
}
@@ -991,6 +1151,10 @@ Handle<FixedArray> KeyAccumulator::GetOwnEnumPropertyKeys(
return GetOwnEnumPropertyDictionaryKeys(
isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
JSGlobalObject::cast(*object).global_dictionary());
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ return GetOwnEnumPropertyDictionaryKeys(
+ isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
+ object->property_dictionary_ordered());
} else {
return GetOwnEnumPropertyDictionaryKeys(
isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
@@ -1021,8 +1185,13 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
Handle<JSProxy> proxy) {
STACK_CHECK(isolate_, Nothing<bool>());
if (filter_ == PRIVATE_NAMES_ONLY) {
- RETURN_NOTHING_IF_NOT_SUCCESSFUL(NameDictionary::CollectKeysTo(
- handle(proxy->property_dictionary(), isolate_), this));
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ RETURN_NOTHING_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
+ handle(proxy->property_dictionary_ordered(), isolate_), this));
+ } else {
+ RETURN_NOTHING_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
+ handle(proxy->property_dictionary(), isolate_), this));
+ }
return Just(true);
}
diff --git a/deps/v8/src/objects/keys.h b/deps/v8/src/objects/keys.h
index d0c27b2a4d..92b1fd783e 100644
--- a/deps/v8/src/objects/keys.h
+++ b/deps/v8/src/objects/keys.h
@@ -13,6 +13,7 @@ namespace v8 {
namespace internal {
class JSProxy;
+class FastKeyAccumulator;
enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX };
@@ -38,6 +39,8 @@ class KeyAccumulator final {
PropertyFilter filter)
: isolate_(isolate), mode_(mode), filter_(filter) {}
~KeyAccumulator() = default;
+ KeyAccumulator(const KeyAccumulator&) = delete;
+ KeyAccumulator& operator=(const KeyAccumulator&) = delete;
static MaybeHandle<FixedArray> GetKeys(
Handle<JSReceiver> object, KeyCollectionMode mode, PropertyFilter filter,
@@ -48,15 +51,6 @@ class KeyAccumulator final {
GetKeysConversion convert = GetKeysConversion::kKeepNumbers);
Maybe<bool> CollectKeys(Handle<JSReceiver> receiver,
Handle<JSReceiver> object);
- Maybe<bool> CollectOwnElementIndices(Handle<JSReceiver> receiver,
- Handle<JSObject> object);
- Maybe<bool> CollectOwnPropertyNames(Handle<JSReceiver> receiver,
- Handle<JSObject> object);
- V8_WARN_UNUSED_RESULT ExceptionStatus
- CollectPrivateNames(Handle<JSReceiver> receiver, Handle<JSObject> object);
- Maybe<bool> CollectAccessCheckInterceptorKeys(
- Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
- Handle<JSObject> object);
// Might return directly the object's enum_cache, copy the result before using
// as an elements backing store for a JSObject.
@@ -69,10 +63,6 @@ class KeyAccumulator final {
AddKey(Object key, AddKeyConversion convert = DO_NOT_CONVERT);
V8_WARN_UNUSED_RESULT ExceptionStatus
AddKey(Handle<Object> key, AddKeyConversion convert = DO_NOT_CONVERT);
- V8_WARN_UNUSED_RESULT ExceptionStatus AddKeys(Handle<FixedArray> array,
- AddKeyConversion convert);
- V8_WARN_UNUSED_RESULT ExceptionStatus AddKeys(Handle<JSObject> array_like,
- AddKeyConversion convert);
// Jump to the next level, pushing the current |levelLength_| to
// |levelLengths_| and adding a new list to |elements_|.
@@ -82,43 +72,74 @@ class KeyAccumulator final {
// The collection mode defines whether we collect the keys from the prototype
// chain or only look at the receiver.
KeyCollectionMode mode() { return mode_; }
- // In case of for-in loops we have to treat JSProxy keys differently and
- // deduplicate them. Additionally we convert JSProxy keys back to array
- // indices.
- void set_is_for_in(bool value) { is_for_in_ = value; }
void set_skip_indices(bool value) { skip_indices_ = value; }
- void set_first_prototype_map(Handle<Map> value) {
- first_prototype_map_ = value;
- }
- void set_try_prototype_info_cache(bool value) {
- try_prototype_info_cache_ = value;
- }
- void set_receiver(Handle<JSReceiver> object) { receiver_ = object; }
- // The last_non_empty_prototype is used to limit the prototypes for which
- // we have to keep track of non-enumerable keys that can shadow keys
- // repeated on the prototype chain.
- void set_last_non_empty_prototype(Handle<JSReceiver> object) {
- last_non_empty_prototype_ = object;
- }
- void set_may_have_elements(bool value) { may_have_elements_ = value; }
// Shadowing keys are used to filter keys. This happens when non-enumerable
// keys appear again on the prototype chain.
void AddShadowingKey(Object key, AllowHeapAllocation* allow_gc);
void AddShadowingKey(Handle<Object> key);
private:
+ enum IndexedOrNamed { kIndexed, kNamed };
+
+ V8_WARN_UNUSED_RESULT ExceptionStatus
+ CollectPrivateNames(Handle<JSReceiver> receiver, Handle<JSObject> object);
+ Maybe<bool> CollectAccessCheckInterceptorKeys(
+ Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
+ Handle<JSObject> object);
+
+ Maybe<bool> CollectInterceptorKeysInternal(
+ Handle<JSReceiver> receiver, Handle<JSObject> object,
+ Handle<InterceptorInfo> interceptor, IndexedOrNamed type);
+ Maybe<bool> CollectInterceptorKeys(Handle<JSReceiver> receiver,
+ Handle<JSObject> object,
+ IndexedOrNamed type);
+
+ Maybe<bool> CollectOwnElementIndices(Handle<JSReceiver> receiver,
+ Handle<JSObject> object);
+ Maybe<bool> CollectOwnPropertyNames(Handle<JSReceiver> receiver,
+ Handle<JSObject> object);
Maybe<bool> CollectOwnKeys(Handle<JSReceiver> receiver,
Handle<JSObject> object);
Maybe<bool> CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
Handle<JSProxy> proxy);
Maybe<bool> CollectOwnJSProxyTargetKeys(Handle<JSProxy> proxy,
Handle<JSReceiver> target);
+
+ V8_WARN_UNUSED_RESULT ExceptionStatus FilterForEnumerableProperties(
+ Handle<JSReceiver> receiver, Handle<JSObject> object,
+ Handle<InterceptorInfo> interceptor, Handle<JSObject> result,
+ IndexedOrNamed type);
+
Maybe<bool> AddKeysFromJSProxy(Handle<JSProxy> proxy,
Handle<FixedArray> keys);
+ V8_WARN_UNUSED_RESULT ExceptionStatus AddKeys(Handle<FixedArray> array,
+ AddKeyConversion convert);
+ V8_WARN_UNUSED_RESULT ExceptionStatus AddKeys(Handle<JSObject> array_like,
+ AddKeyConversion convert);
+
bool IsShadowed(Handle<Object> key);
bool HasShadowingKeys();
Handle<OrderedHashSet> keys();
+ // In case of for-in loops we have to treat JSProxy keys differently and
+ // deduplicate them. Additionally we convert JSProxy keys back to array
+ // indices.
+ void set_is_for_in(bool value) { is_for_in_ = value; }
+ void set_first_prototype_map(Handle<Map> value) {
+ first_prototype_map_ = value;
+ }
+ void set_try_prototype_info_cache(bool value) {
+ try_prototype_info_cache_ = value;
+ }
+ void set_receiver(Handle<JSReceiver> object) { receiver_ = object; }
+ // The last_non_empty_prototype is used to limit the prototypes for which
+ // we have to keep track of non-enumerable keys that can shadow keys
+ // repeated on the prototype chain.
+ void set_last_non_empty_prototype(Handle<JSReceiver> object) {
+ last_non_empty_prototype_ = object;
+ }
+ void set_may_have_elements(bool value) { may_have_elements_ = value; }
+
Isolate* isolate_;
// keys_ is either an Handle<OrderedHashSet> or in the case of own JSProxy
// keys a Handle<FixedArray>. The OrderedHashSet is in-place converted to the
@@ -138,7 +159,7 @@ class KeyAccumulator final {
bool may_have_elements_ = true;
bool try_prototype_info_cache_ = false;
- DISALLOW_COPY_AND_ASSIGN(KeyAccumulator);
+ friend FastKeyAccumulator;
};
// The FastKeyAccumulator handles the cases where there are no elements on the
@@ -158,6 +179,8 @@ class FastKeyAccumulator {
skip_indices_(skip_indices) {
Prepare();
}
+ FastKeyAccumulator(const FastKeyAccumulator&) = delete;
+ FastKeyAccumulator& operator=(const FastKeyAccumulator&) = delete;
bool is_receiver_simple_enum() { return is_receiver_simple_enum_; }
bool has_empty_prototype() { return has_empty_prototype_; }
@@ -193,8 +216,6 @@ class FastKeyAccumulator {
bool has_prototype_info_cache_ = false;
bool try_prototype_info_cache_ = false;
bool only_own_has_simple_elements_ = false;
-
- DISALLOW_COPY_AND_ASSIGN(FastKeyAccumulator);
};
} // namespace internal
diff --git a/deps/v8/src/objects/layout-descriptor-inl.h b/deps/v8/src/objects/layout-descriptor-inl.h
index 561e79505e..76dd3f618b 100644
--- a/deps/v8/src/objects/layout-descriptor-inl.h
+++ b/deps/v8/src/objects/layout-descriptor-inl.h
@@ -175,11 +175,12 @@ int LayoutDescriptor::CalculateCapacity(Map map, DescriptorArray descriptors,
if (!InobjectUnboxedField(inobject_properties, details)) continue;
int field_index = details.field_index();
int field_width_in_words = details.field_width_in_words();
- layout_descriptor_length =
- Max(layout_descriptor_length, field_index + field_width_in_words);
+ layout_descriptor_length = std::max(layout_descriptor_length,
+ field_index + field_width_in_words);
}
}
- layout_descriptor_length = Min(layout_descriptor_length, inobject_properties);
+ layout_descriptor_length =
+ std::min(layout_descriptor_length, inobject_properties);
return layout_descriptor_length;
}
diff --git a/deps/v8/src/objects/layout-descriptor.cc b/deps/v8/src/objects/layout-descriptor.cc
index 2b588a58bf..034680e297 100644
--- a/deps/v8/src/objects/layout-descriptor.cc
+++ b/deps/v8/src/objects/layout-descriptor.cc
@@ -65,7 +65,7 @@ Handle<LayoutDescriptor> LayoutDescriptor::AppendIfFastOrUseFull(
Isolate* isolate, Handle<Map> map, PropertyDetails details,
Handle<LayoutDescriptor> full_layout_descriptor) {
DisallowHeapAllocation no_allocation;
- LayoutDescriptor layout_descriptor = map->layout_descriptor();
+ LayoutDescriptor layout_descriptor = map->layout_descriptor(kAcquireLoad);
if (layout_descriptor.IsSlowLayout()) {
return full_layout_descriptor;
}
@@ -164,8 +164,8 @@ bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
}
}
} else { // Fast layout.
- sequence_length = Min(base::bits::CountTrailingZeros(value),
- static_cast<unsigned>(kBitsInSmiLayout)) -
+ sequence_length = std::min(base::bits::CountTrailingZeros(value),
+ static_cast<unsigned>(kBitsInSmiLayout)) -
layout_bit_index;
if (is_tagged && (field_index + sequence_length == capacity())) {
// The contiguous sequence of tagged fields lasts till the end of the
@@ -174,7 +174,7 @@ bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
sequence_length = std::numeric_limits<int>::max();
}
}
- *out_sequence_length = Min(sequence_length, max_sequence_length);
+ *out_sequence_length = std::min(sequence_length, max_sequence_length);
return is_tagged;
}
@@ -200,7 +200,7 @@ bool LayoutDescriptorHelper::IsTagged(
return true;
}
int max_sequence_length = (end_offset - offset_in_bytes) / kTaggedSize;
- int field_index = Max(0, (offset_in_bytes - header_size_) / kTaggedSize);
+ int field_index = std::max(0, (offset_in_bytes - header_size_) / kTaggedSize);
int sequence_length;
bool tagged = layout_descriptor_.IsTagged(field_index, max_sequence_length,
&sequence_length);
@@ -257,7 +257,7 @@ LayoutDescriptor LayoutDescriptor::Trim(Heap* heap, Map map,
bool LayoutDescriptor::IsConsistentWithMap(Map map, bool check_tail) {
if (FLAG_unbox_double_fields) {
- DescriptorArray descriptors = map.instance_descriptors();
+ DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
int last_field_index = 0;
for (InternalIndex i : map.IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
@@ -271,8 +271,8 @@ bool LayoutDescriptor::IsConsistentWithMap(Map map, bool check_tail) {
if (tagged_actual != tagged_expected) return false;
}
last_field_index =
- Max(last_field_index,
- details.field_index() + details.field_width_in_words());
+ std::max(last_field_index,
+ details.field_index() + details.field_width_in_words());
}
if (check_tail) {
int n = capacity();
diff --git a/deps/v8/src/objects/literal-objects-inl.h b/deps/v8/src/objects/literal-objects-inl.h
index d7b0185f7b..8b08dedb72 100644
--- a/deps/v8/src/objects/literal-objects-inl.h
+++ b/deps/v8/src/objects/literal-objects-inl.h
@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/literal-objects-tq-inl.inc"
+
//
// ObjectBoilerplateDescription
//
@@ -27,11 +29,11 @@ SMI_ACCESSORS(ObjectBoilerplateDescription, flags,
FixedArray::OffsetOfElementAt(kLiteralTypeOffset))
Object ObjectBoilerplateDescription::name(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return name(isolate, index);
}
-Object ObjectBoilerplateDescription::name(const Isolate* isolate,
+Object ObjectBoilerplateDescription::name(IsolateRoot isolate,
int index) const {
// get() already checks for out of bounds access, but we do not want to allow
// access to the last element, if it is the number of properties.
@@ -40,11 +42,11 @@ Object ObjectBoilerplateDescription::name(const Isolate* isolate,
}
Object ObjectBoilerplateDescription::value(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return value(isolate, index);
}
-Object ObjectBoilerplateDescription::value(const Isolate* isolate,
+Object ObjectBoilerplateDescription::value(IsolateRoot isolate,
int index) const {
return get(isolate, 2 * index + 1 + kDescriptionStartIndex);
}
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index b5cdfd2795..365eb6ba9e 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -119,8 +119,9 @@ constexpr int ComputeEnumerationIndex(int value_index) {
// We "shift" value indices to ensure that the enumeration index for the value
// will not overlap with minimum properties set for both class and prototype
// objects.
- return value_index + Max(ClassBoilerplate::kMinimumClassPropertiesCount,
- ClassBoilerplate::kMinimumPrototypePropertiesCount);
+ return value_index +
+ std::max({ClassBoilerplate::kMinimumClassPropertiesCount,
+ ClassBoilerplate::kMinimumPrototypePropertiesCount});
}
inline int GetExistingValueIndex(Object value) {
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index 6603a9fad8..2ea5a521c5 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -17,6 +17,8 @@ namespace internal {
class ClassLiteral;
+#include "torque-generated/src/objects/literal-objects-tq.inc"
+
// ObjectBoilerplateDescription is a list of properties consisting of name value
// pairs. In addition to the properties, it provides the projected number
// of properties in the backing store. This number includes properties with
@@ -26,10 +28,10 @@ class ClassLiteral;
class ObjectBoilerplateDescription : public FixedArray {
public:
inline Object name(int index) const;
- inline Object name(const Isolate* isolate, int index) const;
+ inline Object name(IsolateRoot isolate, int index) const;
inline Object value(int index) const;
- inline Object value(const Isolate* isolate, int index) const;
+ inline Object value(IsolateRoot isolate, int index) const;
inline void set_key_value(int index, Object key, Object value);
diff --git a/deps/v8/src/objects/lookup-cache.h b/deps/v8/src/objects/lookup-cache.h
index a2016d23df..4aa3c5a588 100644
--- a/deps/v8/src/objects/lookup-cache.h
+++ b/deps/v8/src/objects/lookup-cache.h
@@ -18,6 +18,8 @@ namespace internal {
// Cleared at startup and prior to any gc.
class DescriptorLookupCache {
public:
+ DescriptorLookupCache(const DescriptorLookupCache&) = delete;
+ DescriptorLookupCache& operator=(const DescriptorLookupCache&) = delete;
// Lookup descriptor index for (map, name).
// If absent, kAbsent is returned.
inline int Lookup(Map source, Name name);
@@ -51,7 +53,6 @@ class DescriptorLookupCache {
int results_[kLength];
friend class Isolate;
- DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
};
} // namespace internal
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
index 25f2d254df..da7a4740ae 100644
--- a/deps/v8/src/objects/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -4,18 +4,19 @@
#include "src/objects/lookup.h"
+#include "src/common/globals.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/protectors-inl.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/elements.h"
#include "src/objects/field-type.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/ordered-hash-table.h"
#include "src/objects/struct-inl.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions.h"
namespace v8 {
namespace internal {
@@ -437,8 +438,9 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
if (old_map.is_identical_to(new_map)) {
// Update the property details if the representation was None.
if (constness() != new_constness || representation().IsNone()) {
- property_details_ = new_map->instance_descriptors(isolate_).GetDetails(
- descriptor_number());
+ property_details_ =
+ new_map->instance_descriptors(isolate_, kRelaxedLoad)
+ .GetDetails(descriptor_number());
}
return;
}
@@ -510,15 +512,24 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
cell->set_value(*value);
property_details_ = cell->property_details();
} else {
- Handle<NameDictionary> dictionary(
- holder_obj->property_dictionary(isolate_), isolate());
- PropertyDetails original_details =
- dictionary->DetailsAt(dictionary_entry());
- int enumeration_index = original_details.dictionary_index();
- DCHECK_GT(enumeration_index, 0);
- details = details.set_index(enumeration_index);
- dictionary->SetEntry(dictionary_entry(), *name(), *value, details);
- property_details_ = details;
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ Handle<OrderedNameDictionary> dictionary(
+ holder_obj->property_dictionary_ordered(isolate_), isolate());
+ dictionary->SetEntry(dictionary_entry(), *name(), *value, details);
+ DCHECK_EQ(details.AsSmi(),
+ dictionary->DetailsAt(dictionary_entry()).AsSmi());
+ property_details_ = details;
+ } else {
+ Handle<NameDictionary> dictionary(
+ holder_obj->property_dictionary(isolate_), isolate());
+ PropertyDetails original_details =
+ dictionary->DetailsAt(dictionary_entry());
+ int enumeration_index = original_details.dictionary_index();
+ DCHECK_GT(enumeration_index, 0);
+ details = details.set_index(enumeration_index);
+ dictionary->SetEntry(dictionary_entry(), *name(), *value, details);
+ property_details_ = details;
+ }
}
state_ = DATA;
}
@@ -641,18 +652,35 @@ void LookupIterator::ApplyTransitionToDataProperty(
property_details_ = transition->GetLastDescriptorDetails(isolate_);
state_ = DATA;
} else if (receiver->map(isolate_).is_dictionary_map()) {
- Handle<NameDictionary> dictionary(receiver->property_dictionary(isolate_),
- isolate_);
if (receiver->map(isolate_).is_prototype_map() &&
receiver->IsJSObject(isolate_)) {
JSObject::InvalidatePrototypeChains(receiver->map(isolate_));
}
- dictionary = NameDictionary::Add(isolate(), dictionary, name(),
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ Handle<OrderedNameDictionary> dictionary(
+ receiver->property_dictionary_ordered(isolate_), isolate_);
+
+ dictionary =
+ OrderedNameDictionary::Add(isolate(), dictionary, name(),
isolate_->factory()->uninitialized_value(),
- property_details_, &number_);
- receiver->SetProperties(*dictionary);
- // Reload details containing proper enumeration index value.
- property_details_ = dictionary->DetailsAt(number_);
+ property_details_)
+ .ToHandleChecked();
+
+ // set to last used entry
+ number_ = InternalIndex(dictionary->UsedCapacity() - 1);
+ receiver->SetProperties(*dictionary);
+ } else {
+ Handle<NameDictionary> dictionary(receiver->property_dictionary(isolate_),
+ isolate_);
+
+ dictionary =
+ NameDictionary::Add(isolate(), dictionary, name(),
+ isolate_->factory()->uninitialized_value(),
+ property_details_, &number_);
+ receiver->SetProperties(*dictionary);
+ // Reload details containing proper enumeration index value.
+ property_details_ = dictionary->DetailsAt(number_);
+ }
has_property_ = true;
state_ = DATA;
@@ -837,8 +865,13 @@ Handle<Object> LookupIterator::FetchValue(
result = holder->global_dictionary(isolate_).ValueAt(isolate_,
dictionary_entry());
} else if (!holder_->HasFastProperties(isolate_)) {
- result = holder_->property_dictionary(isolate_).ValueAt(isolate_,
- dictionary_entry());
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ result = holder_->property_dictionary_ordered(isolate_).ValueAt(
+ dictionary_entry());
+ } else {
+ result = holder_->property_dictionary(isolate_).ValueAt(
+ isolate_, dictionary_entry());
+ }
} else if (property_details_.location() == kField) {
DCHECK_EQ(kData, property_details_.kind());
Handle<JSObject> holder = GetHolder<JSObject>();
@@ -851,9 +884,9 @@ Handle<Object> LookupIterator::FetchValue(
return JSObject::FastPropertyAt(holder, property_details_.representation(),
field_index);
} else {
- result =
- holder_->map(isolate_).instance_descriptors(isolate_).GetStrongValue(
- isolate_, descriptor_number());
+ result = holder_->map(isolate_)
+ .instance_descriptors(isolate_, kRelaxedLoad)
+ .GetStrongValue(isolate_, descriptor_number());
}
return handle(result, isolate_);
}
@@ -941,10 +974,10 @@ Handle<FieldType> LookupIterator::GetFieldType() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(kField, property_details_.location());
- return handle(
- holder_->map(isolate_).instance_descriptors(isolate_).GetFieldType(
- isolate_, descriptor_number()),
- isolate_);
+ return handle(holder_->map(isolate_)
+ .instance_descriptors(isolate_, kRelaxedLoad)
+ .GetFieldType(isolate_, descriptor_number()),
+ isolate_);
}
Handle<PropertyCell> LookupIterator::GetPropertyCell() const {
@@ -994,8 +1027,14 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
dictionary.CellAt(isolate_, dictionary_entry()).set_value(*value);
} else {
DCHECK_IMPLIES(holder->IsJSProxy(isolate_), name()->IsPrivate(isolate_));
- NameDictionary dictionary = holder->property_dictionary(isolate_);
- dictionary.ValueAtPut(dictionary_entry(), *value);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ OrderedNameDictionary dictionary =
+ holder->property_dictionary_ordered(isolate_);
+ dictionary.ValueAtPut(dictionary_entry(), *value);
+ } else {
+ NameDictionary dictionary = holder->property_dictionary(isolate_);
+ dictionary.ValueAtPut(dictionary_entry(), *value);
+ }
}
}
@@ -1131,16 +1170,24 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
property_details_ = property_details_.CopyAddAttributes(SEALED);
}
} else if (!map.is_dictionary_map()) {
- DescriptorArray descriptors = map.instance_descriptors(isolate_);
+ DescriptorArray descriptors =
+ map.instance_descriptors(isolate_, kRelaxedLoad);
number_ = descriptors.SearchWithCache(isolate_, *name_, map);
if (number_.is_not_found()) return NotFound(holder);
property_details_ = descriptors.GetDetails(number_);
} else {
DCHECK_IMPLIES(holder.IsJSProxy(isolate_), name()->IsPrivate(isolate_));
- NameDictionary dict = holder.property_dictionary(isolate_);
- number_ = dict.FindEntry(isolate(), name_);
- if (number_.is_not_found()) return NotFound(holder);
- property_details_ = dict.DetailsAt(number_);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ OrderedNameDictionary dict = holder.property_dictionary_ordered(isolate_);
+ number_ = dict.FindEntry(isolate(), *name_);
+ if (number_.is_not_found()) return NotFound(holder);
+ property_details_ = dict.DetailsAt(number_);
+ } else {
+ NameDictionary dict = holder.property_dictionary(isolate_);
+ number_ = dict.FindEntry(isolate(), name_);
+ if (number_.is_not_found()) return NotFound(holder);
+ property_details_ = dict.DetailsAt(number_);
+ }
}
has_property_ = true;
switch (property_details_.kind()) {
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 01beb50652..9529ea234c 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -20,6 +20,7 @@
#include "src/objects/shared-function-info.h"
#include "src/objects/templates-inl.h"
#include "src/objects/transitions-inl.h"
+#include "src/objects/transitions.h"
#include "src/wasm/wasm-objects-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -28,24 +29,23 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/map-tq-inl.inc"
+
OBJECT_CONSTRUCTORS_IMPL(Map, HeapObject)
CAST_ACCESSOR(Map)
-DEF_GETTER(Map, instance_descriptors, DescriptorArray) {
- return TaggedField<DescriptorArray, kInstanceDescriptorsOffset>::load(isolate,
- *this);
-}
-
-SYNCHRONIZED_ACCESSORS(Map, synchronized_instance_descriptors, DescriptorArray,
- kInstanceDescriptorsOffset)
+RELAXED_ACCESSORS(Map, instance_descriptors, DescriptorArray,
+ kInstanceDescriptorsOffset)
+RELEASE_ACQUIRE_ACCESSORS(Map, instance_descriptors, DescriptorArray,
+ kInstanceDescriptorsOffset)
// A freshly allocated layout descriptor can be set on an existing map.
// We need to use release-store and acquire-load accessor pairs to ensure
// that the concurrent marking thread observes initializing stores of the
// layout descriptor.
-SYNCHRONIZED_ACCESSORS_CHECKED(Map, layout_descriptor, LayoutDescriptor,
- kLayoutDescriptorOffset,
- FLAG_unbox_double_fields)
+RELEASE_ACQUIRE_ACCESSORS_CHECKED(Map, layout_descriptor, LayoutDescriptor,
+ kLayoutDescriptorOffset,
+ FLAG_unbox_double_fields)
SYNCHRONIZED_WEAK_ACCESSORS(Map, raw_transitions,
kTransitionsOrPrototypeInfoOffset)
@@ -157,21 +157,22 @@ bool Map::EquivalentToForNormalization(const Map other,
}
bool Map::IsUnboxedDoubleField(FieldIndex index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return IsUnboxedDoubleField(isolate, index);
}
-bool Map::IsUnboxedDoubleField(const Isolate* isolate, FieldIndex index) const {
+bool Map::IsUnboxedDoubleField(IsolateRoot isolate, FieldIndex index) const {
if (!FLAG_unbox_double_fields) return false;
if (!index.is_inobject()) return false;
- return !layout_descriptor(isolate).IsTagged(index.property_index());
+ return !layout_descriptor(isolate, kAcquireLoad)
+ .IsTagged(index.property_index());
}
bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
if (UnusedPropertyFields() != 0) return false;
if (is_prototype_map()) return false;
if (store_origin == StoreOrigin::kNamed) {
- int limit = Max(kMaxFastProperties, GetInObjectProperties());
+ int limit = std::max({kMaxFastProperties, GetInObjectProperties()});
FieldCounts counts = GetFieldCounts();
// Only count mutable fields so that objects with large numbers of
// constant functions do not go to dictionary mode. That would be bad
@@ -179,14 +180,14 @@ bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
int external = counts.mutable_count() - GetInObjectProperties();
return external > limit || counts.GetTotal() > kMaxNumberOfDescriptors;
} else {
- int limit = Max(kFastPropertiesSoftLimit, GetInObjectProperties());
+ int limit = std::max({kFastPropertiesSoftLimit, GetInObjectProperties()});
int external = NumberOfFields() - GetInObjectProperties();
return external > limit;
}
}
PropertyDetails Map::GetLastDescriptorDetails(Isolate* isolate) const {
- return instance_descriptors(isolate).GetDetails(LastAdded());
+ return instance_descriptors(isolate, kRelaxedLoad).GetDetails(LastAdded());
}
InternalIndex Map::LastAdded() const {
@@ -200,7 +201,7 @@ int Map::NumberOfOwnDescriptors() const {
}
void Map::SetNumberOfOwnDescriptors(int number) {
- DCHECK_LE(number, instance_descriptors().number_of_descriptors());
+ DCHECK_LE(number, instance_descriptors(kRelaxedLoad).number_of_descriptors());
CHECK_LE(static_cast<unsigned>(number),
static_cast<unsigned>(kMaxNumberOfDescriptors));
set_bit_field3(
@@ -563,7 +564,7 @@ bool Map::is_stable() const {
bool Map::CanBeDeprecated() const {
for (InternalIndex i : IterateOwnDescriptors()) {
- PropertyDetails details = instance_descriptors().GetDetails(i);
+ PropertyDetails details = instance_descriptors(kRelaxedLoad).GetDetails(i);
if (details.representation().IsNone()) return true;
if (details.representation().IsSmi()) return true;
if (details.representation().IsDouble() && FLAG_unbox_double_fields)
@@ -633,17 +634,17 @@ void Map::UpdateDescriptors(Isolate* isolate, DescriptorArray descriptors,
int number_of_own_descriptors) {
SetInstanceDescriptors(isolate, descriptors, number_of_own_descriptors);
if (FLAG_unbox_double_fields) {
- if (layout_descriptor().IsSlowLayout()) {
- set_layout_descriptor(layout_desc);
+ if (layout_descriptor(kAcquireLoad).IsSlowLayout()) {
+ set_layout_descriptor(layout_desc, kReleaseStore);
}
#ifdef VERIFY_HEAP
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
- CHECK(layout_descriptor().IsConsistentWithMap(*this));
+ CHECK(layout_descriptor(kAcquireLoad).IsConsistentWithMap(*this));
CHECK_EQ(Map::GetVisitorId(*this), visitor_id());
}
#else
- SLOW_DCHECK(layout_descriptor().IsConsistentWithMap(*this));
+ SLOW_DCHECK(layout_descriptor(kAcquireLoad).IsConsistentWithMap(*this));
DCHECK(visitor_id() == Map::GetVisitorId(*this));
#endif
}
@@ -655,14 +656,14 @@ void Map::InitializeDescriptors(Isolate* isolate, DescriptorArray descriptors,
descriptors.number_of_descriptors());
if (FLAG_unbox_double_fields) {
- set_layout_descriptor(layout_desc);
+ set_layout_descriptor(layout_desc, kReleaseStore);
#ifdef VERIFY_HEAP
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
- CHECK(layout_descriptor().IsConsistentWithMap(*this));
+ CHECK(layout_descriptor(kAcquireLoad).IsConsistentWithMap(*this));
}
#else
- SLOW_DCHECK(layout_descriptor().IsConsistentWithMap(*this));
+ SLOW_DCHECK(layout_descriptor(kAcquireLoad).IsConsistentWithMap(*this));
#endif
set_visitor_id(Map::GetVisitorId(*this));
}
@@ -684,12 +685,12 @@ void Map::clear_padding() {
}
LayoutDescriptor Map::GetLayoutDescriptor() const {
- return FLAG_unbox_double_fields ? layout_descriptor()
+ return FLAG_unbox_double_fields ? layout_descriptor(kAcquireLoad)
: LayoutDescriptor::FastPointerLayout();
}
void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
- DescriptorArray descriptors = instance_descriptors();
+ DescriptorArray descriptors = instance_descriptors(kRelaxedLoad);
int number_of_own_descriptors = NumberOfOwnDescriptors();
DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
{
@@ -832,7 +833,7 @@ int Map::SlackForArraySize(int old_size, int size_limit) {
DCHECK_LE(1, max_slack);
return 1;
}
- return Min(max_slack, old_size / 4);
+ return std::min(max_slack, old_size / 4);
}
int Map::InstanceSizeFromSlack(int slack) const {
diff --git a/deps/v8/src/objects/map-updater.cc b/deps/v8/src/objects/map-updater.cc
index b4b1587493..36d5da85e8 100644
--- a/deps/v8/src/objects/map-updater.cc
+++ b/deps/v8/src/objects/map-updater.cc
@@ -28,7 +28,7 @@ inline bool EqualImmutableValues(Object obj1, Object obj2) {
MapUpdater::MapUpdater(Isolate* isolate, Handle<Map> old_map)
: isolate_(isolate),
old_map_(old_map),
- old_descriptors_(old_map->instance_descriptors(), isolate_),
+ old_descriptors_(old_map->instance_descriptors(kRelaxedLoad), isolate_),
old_nof_(old_map_->NumberOfOwnDescriptors()),
new_elements_kind_(old_map_->elements_kind()),
is_transitionable_fast_elements_kind_(
@@ -197,8 +197,9 @@ void MapUpdater::GeneralizeField(Handle<Map> map, InternalIndex modify_index,
Map::GeneralizeField(isolate_, map, modify_index, new_constness,
new_representation, new_field_type);
- DCHECK(*old_descriptors_ == old_map_->instance_descriptors() ||
- *old_descriptors_ == integrity_source_map_->instance_descriptors());
+ DCHECK(*old_descriptors_ == old_map_->instance_descriptors(kRelaxedLoad) ||
+ *old_descriptors_ ==
+ integrity_source_map_->instance_descriptors(kRelaxedLoad));
}
MapUpdater::State MapUpdater::Normalize(const char* reason) {
@@ -284,8 +285,8 @@ bool MapUpdater::TrySaveIntegrityLevelTransitions() {
integrity_source_map_->NumberOfOwnDescriptors());
has_integrity_level_transition_ = true;
- old_descriptors_ =
- handle(integrity_source_map_->instance_descriptors(), isolate_);
+ old_descriptors_ = handle(
+ integrity_source_map_->instance_descriptors(kRelaxedLoad), isolate_);
return true;
}
@@ -380,8 +381,8 @@ MapUpdater::State MapUpdater::FindTargetMap() {
if (transition.is_null()) break;
Handle<Map> tmp_map(transition, isolate_);
- Handle<DescriptorArray> tmp_descriptors(tmp_map->instance_descriptors(),
- isolate_);
+ Handle<DescriptorArray> tmp_descriptors(
+ tmp_map->instance_descriptors(kRelaxedLoad), isolate_);
// Check if target map is incompatible.
PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
@@ -428,7 +429,8 @@ MapUpdater::State MapUpdater::FindTargetMap() {
if (target_nof == old_nof_) {
#ifdef DEBUG
if (modified_descriptor_.is_found()) {
- DescriptorArray target_descriptors = target_map_->instance_descriptors();
+ DescriptorArray target_descriptors =
+ target_map_->instance_descriptors(kRelaxedLoad);
PropertyDetails details =
target_descriptors.GetDetails(modified_descriptor_);
DCHECK_EQ(new_kind_, details.kind());
@@ -476,8 +478,8 @@ MapUpdater::State MapUpdater::FindTargetMap() {
old_details.attributes());
if (transition.is_null()) break;
Handle<Map> tmp_map(transition, isolate_);
- Handle<DescriptorArray> tmp_descriptors(tmp_map->instance_descriptors(),
- isolate_);
+ Handle<DescriptorArray> tmp_descriptors(
+ tmp_map->instance_descriptors(kRelaxedLoad), isolate_);
#ifdef DEBUG
// Check that target map is compatible.
PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
@@ -501,7 +503,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
InstanceType instance_type = old_map_->instance_type();
int target_nof = target_map_->NumberOfOwnDescriptors();
Handle<DescriptorArray> target_descriptors(
- target_map_->instance_descriptors(), isolate_);
+ target_map_->instance_descriptors(kRelaxedLoad), isolate_);
// Allocate a new descriptor array large enough to hold the required
// descriptors, with minimally the exact same size as the old descriptor
@@ -676,7 +678,7 @@ Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
TransitionsAccessor(isolate_, current, &no_allocation)
.SearchTransition(name, details.kind(), details.attributes());
if (next.is_null()) break;
- DescriptorArray next_descriptors = next.instance_descriptors();
+ DescriptorArray next_descriptors = next.instance_descriptors(kRelaxedLoad);
PropertyDetails next_details = next_descriptors.GetDetails(i);
DCHECK_EQ(details.kind(), next_details.kind());
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index d1370aeaf4..535ec82d63 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -12,6 +12,7 @@
#include "src/init/bootstrapper.h"
#include "src/logging/counters-inl.h"
#include "src/logging/log.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/elements-kind.h"
#include "src/objects/field-type.h"
@@ -25,8 +26,6 @@
#include "src/roots/roots.h"
#include "src/utils/ostreams.h"
#include "src/zone/zone-containers.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions.h"
#include "torque-generated/field-offsets.h"
namespace v8 {
@@ -66,7 +65,7 @@ void Map::PrintReconfiguration(Isolate* isolate, FILE* file,
PropertyAttributes attributes) {
OFStream os(file);
os << "[reconfiguring]";
- Name name = instance_descriptors().GetKey(modify_index);
+ Name name = instance_descriptors(kRelaxedLoad).GetKey(modify_index);
if (name.IsString()) {
String::cast(name).PrintOn(file);
} else {
@@ -188,9 +187,6 @@ VisitorId Map::GetVisitorId(Map map) {
case FEEDBACK_METADATA_TYPE:
return kVisitFeedbackMetadata;
- case ODDBALL_TYPE:
- return kVisitOddball;
-
case MAP_TYPE:
return kVisitMap;
@@ -203,9 +199,6 @@ VisitorId Map::GetVisitorId(Map map) {
case PROPERTY_CELL_TYPE:
return kVisitPropertyCell;
- case DESCRIPTOR_ARRAY_TYPE:
- return kVisitDescriptorArray;
-
case TRANSITION_ARRAY_TYPE:
return kVisitTransitionArray;
@@ -389,7 +382,7 @@ void Map::PrintGeneralization(
MaybeHandle<Object> new_value) {
OFStream os(file);
os << "[generalizing]";
- Name name = instance_descriptors().GetKey(modify_index);
+ Name name = instance_descriptors(kRelaxedLoad).GetKey(modify_index);
if (name.IsString()) {
String::cast(name).PrintOn(file);
} else {
@@ -450,7 +443,7 @@ MaybeHandle<Map> Map::CopyWithField(Isolate* isolate, Handle<Map> map,
PropertyConstness constness,
Representation representation,
TransitionFlag flag) {
- DCHECK(map->instance_descriptors()
+ DCHECK(map->instance_descriptors(kRelaxedLoad)
.Search(*name, map->NumberOfOwnDescriptors())
.is_not_found());
@@ -509,7 +502,7 @@ bool Map::TransitionRemovesTaggedField(Map target) const {
bool Map::TransitionChangesTaggedFieldToUntaggedField(Map target) const {
int inobject = NumberOfFields();
int target_inobject = target.NumberOfFields();
- int limit = Min(inobject, target_inobject);
+ int limit = std::min(inobject, target_inobject);
for (int i = 0; i < limit; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(target, i);
if (!IsUnboxedDoubleField(index) && target.IsUnboxedDoubleField(index)) {
@@ -544,8 +537,8 @@ bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
if (target_number_of_fields != *old_number_of_fields) return true;
// If smi descriptors were replaced by double descriptors, rewrite.
- DescriptorArray old_desc = instance_descriptors();
- DescriptorArray new_desc = target.instance_descriptors();
+ DescriptorArray old_desc = instance_descriptors(kRelaxedLoad);
+ DescriptorArray new_desc = target.instance_descriptors(kRelaxedLoad);
for (InternalIndex i : IterateOwnDescriptors()) {
if (new_desc.GetDetails(i).representation().IsDouble() !=
old_desc.GetDetails(i).representation().IsDouble()) {
@@ -569,7 +562,7 @@ bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
}
int Map::NumberOfFields() const {
- DescriptorArray descriptors = instance_descriptors();
+ DescriptorArray descriptors = instance_descriptors(kRelaxedLoad);
int result = 0;
for (InternalIndex i : IterateOwnDescriptors()) {
if (descriptors.GetDetails(i).location() == kField) result++;
@@ -578,7 +571,7 @@ int Map::NumberOfFields() const {
}
Map::FieldCounts Map::GetFieldCounts() const {
- DescriptorArray descriptors = instance_descriptors();
+ DescriptorArray descriptors = instance_descriptors(kRelaxedLoad);
int mutable_count = 0;
int const_count = 0;
for (InternalIndex i : IterateOwnDescriptors()) {
@@ -630,7 +623,7 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
return;
}
- DescriptorArray to_replace = instance_descriptors();
+ DescriptorArray to_replace = instance_descriptors(kRelaxedLoad);
// Replace descriptors by new_descriptors in all maps that share it. The old
// descriptors will not be trimmed in the mark-compactor, we need to mark
// all its elements.
@@ -638,7 +631,7 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
#ifndef V8_DISABLE_WRITE_BARRIERS
WriteBarrier::Marking(to_replace, to_replace.number_of_descriptors());
#endif
- while (current.instance_descriptors(isolate) == to_replace) {
+ while (current.instance_descriptors(isolate, kRelaxedLoad) == to_replace) {
Object next = current.GetBackPointer(isolate);
if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map.
current.SetEnumLength(kInvalidEnumCacheSentinel);
@@ -656,8 +649,9 @@ Map Map::FindRootMap(Isolate* isolate) const {
if (back.IsUndefined(isolate)) {
// Initial map must not contain descriptors in the descriptors array
// that do not belong to the map.
- DCHECK_LE(result.NumberOfOwnDescriptors(),
- result.instance_descriptors().number_of_descriptors());
+ DCHECK_LE(
+ result.NumberOfOwnDescriptors(),
+ result.instance_descriptors(kRelaxedLoad).number_of_descriptors());
return result;
}
result = Map::cast(back);
@@ -666,8 +660,9 @@ Map Map::FindRootMap(Isolate* isolate) const {
Map Map::FindFieldOwner(Isolate* isolate, InternalIndex descriptor) const {
DisallowHeapAllocation no_allocation;
- DCHECK_EQ(kField,
- instance_descriptors(isolate).GetDetails(descriptor).location());
+ DCHECK_EQ(kField, instance_descriptors(isolate, kRelaxedLoad)
+ .GetDetails(descriptor)
+ .location());
Map result = *this;
while (true) {
Object back = result.GetBackPointer(isolate);
@@ -686,7 +681,8 @@ void Map::UpdateFieldType(Isolate* isolate, InternalIndex descriptor,
DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeak());
// We store raw pointers in the queue, so no allocations are allowed.
DisallowHeapAllocation no_allocation;
- PropertyDetails details = instance_descriptors().GetDetails(descriptor);
+ PropertyDetails details =
+ instance_descriptors(kRelaxedLoad).GetDetails(descriptor);
if (details.location() != kField) return;
DCHECK_EQ(kData, details.kind());
@@ -708,7 +704,7 @@ void Map::UpdateFieldType(Isolate* isolate, InternalIndex descriptor,
Map target = transitions.GetTarget(i);
backlog.push(target);
}
- DescriptorArray descriptors = current.instance_descriptors();
+ DescriptorArray descriptors = current.instance_descriptors(kRelaxedLoad);
PropertyDetails details = descriptors.GetDetails(descriptor);
// It is allowed to change representation here only from None
@@ -756,7 +752,8 @@ void Map::GeneralizeField(Isolate* isolate, Handle<Map> map,
Representation new_representation,
Handle<FieldType> new_field_type) {
// Check if we actually need to generalize the field type at all.
- Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> old_descriptors(
+ map->instance_descriptors(kRelaxedLoad), isolate);
PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
PropertyConstness old_constness = old_details.constness();
Representation old_representation = old_details.representation();
@@ -779,8 +776,8 @@ void Map::GeneralizeField(Isolate* isolate, Handle<Map> map,
// Determine the field owner.
Handle<Map> field_owner(map->FindFieldOwner(isolate, modify_index), isolate);
- Handle<DescriptorArray> descriptors(field_owner->instance_descriptors(),
- isolate);
+ Handle<DescriptorArray> descriptors(
+ field_owner->instance_descriptors(kRelaxedLoad), isolate);
DCHECK_EQ(*old_field_type, descriptors->GetFieldType(modify_index));
new_field_type =
@@ -866,7 +863,7 @@ Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
// types instead of old_map's types.
// Go to slow map updating if the old_map has fast properties with cleared
// field types.
- DescriptorArray old_descriptors = old_map.instance_descriptors();
+ DescriptorArray old_descriptors = old_map.instance_descriptors(kRelaxedLoad);
for (InternalIndex i : old_map.IterateOwnDescriptors()) {
PropertyDetails old_details = old_descriptors.GetDetails(i);
if (old_details.location() == kField && old_details.kind() == kData) {
@@ -1029,7 +1026,7 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map) {
int root_nof = NumberOfOwnDescriptors();
int old_nof = old_map.NumberOfOwnDescriptors();
- DescriptorArray old_descriptors = old_map.instance_descriptors();
+ DescriptorArray old_descriptors = old_map.instance_descriptors(kRelaxedLoad);
Map new_map = *this;
for (InternalIndex i : InternalIndex::Range(root_nof, old_nof)) {
@@ -1040,7 +1037,8 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map) {
old_details.attributes());
if (transition.is_null()) return Map();
new_map = transition;
- DescriptorArray new_descriptors = new_map.instance_descriptors();
+ DescriptorArray new_descriptors =
+ new_map.instance_descriptors(kRelaxedLoad);
PropertyDetails new_details = new_descriptors.GetDetails(i);
DCHECK_EQ(old_details.kind(), new_details.kind());
@@ -1105,7 +1103,8 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
// Only supports adding slack to owned descriptors.
DCHECK(map->owns_descriptors());
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate);
int old_size = map->NumberOfOwnDescriptors();
if (slack <= descriptors->number_of_slack_descriptors()) return;
@@ -1137,7 +1136,7 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
#endif
Map current = *map;
- while (current.instance_descriptors() == *descriptors) {
+ while (current.instance_descriptors(kRelaxedLoad) == *descriptors) {
Object next = current.GetBackPointer();
if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map.
current.UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
@@ -1388,7 +1387,7 @@ Handle<Map> Map::AsElementsKind(Isolate* isolate, Handle<Map> map,
int Map::NumberOfEnumerableProperties() const {
int result = 0;
- DescriptorArray descs = instance_descriptors();
+ DescriptorArray descs = instance_descriptors(kRelaxedLoad);
for (InternalIndex i : IterateOwnDescriptors()) {
if ((descs.GetDetails(i).attributes() & ONLY_ENUMERABLE) == 0 &&
!descs.GetKey(i).FilterKey(ENUMERABLE_STRINGS)) {
@@ -1400,7 +1399,7 @@ int Map::NumberOfEnumerableProperties() const {
int Map::NextFreePropertyIndex() const {
int number_of_own_descriptors = NumberOfOwnDescriptors();
- DescriptorArray descs = instance_descriptors();
+ DescriptorArray descs = instance_descriptors(kRelaxedLoad);
// Search properties backwards to find the last field.
for (int i = number_of_own_descriptors - 1; i >= 0; --i) {
PropertyDetails details = descs.GetDetails(InternalIndex(i));
@@ -1587,18 +1586,20 @@ Handle<Map> Map::TransitionToImmutableProto(Isolate* isolate, Handle<Map> map) {
namespace {
void EnsureInitialMap(Isolate* isolate, Handle<Map> map) {
#ifdef DEBUG
- // Strict function maps have Function as a constructor but the
- // Function's initial map is a sloppy function map. Same holds for
- // GeneratorFunction / AsyncFunction and its initial map.
- Object constructor = map->GetConstructor();
- DCHECK(constructor.IsJSFunction());
- DCHECK(*map == JSFunction::cast(constructor).initial_map() ||
+ Object maybe_constructor = map->GetConstructor();
+ DCHECK((maybe_constructor.IsJSFunction() &&
+ *map == JSFunction::cast(maybe_constructor).initial_map()) ||
+ // Below are the exceptions to the check above.
+ // Strict function maps have Function as a constructor but the
+ // Function's initial map is a sloppy function map.
*map == *isolate->strict_function_map() ||
*map == *isolate->strict_function_with_name_map() ||
+ // Same holds for GeneratorFunction and its initial map.
*map == *isolate->generator_function_map() ||
*map == *isolate->generator_function_with_name_map() ||
*map == *isolate->generator_function_with_home_object_map() ||
*map == *isolate->generator_function_with_name_and_home_object_map() ||
+ // AsyncFunction has Null as a constructor.
*map == *isolate->async_function_map() ||
*map == *isolate->async_function_with_name_map() ||
*map == *isolate->async_function_with_home_object_map() ||
@@ -1607,7 +1608,7 @@ void EnsureInitialMap(Isolate* isolate, Handle<Map> map) {
// Initial maps must not contain descriptors in the descriptors array
// that do not belong to the map.
DCHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors().number_of_descriptors());
+ map->instance_descriptors(kRelaxedLoad).number_of_descriptors());
}
} // namespace
@@ -1623,10 +1624,6 @@ Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map,
int instance_size, int inobject_properties,
int unused_property_fields) {
EnsureInitialMap(isolate, map);
- // Initial map must not contain descriptors in the descriptors array
- // that do not belong to the map.
- DCHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors().number_of_descriptors());
Handle<Map> result =
RawCopy(isolate, map, instance_size, inobject_properties);
@@ -1637,7 +1634,7 @@ Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map,
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
if (number_of_own_descriptors > 0) {
// The copy will use the same descriptors array without ownership.
- DescriptorArray descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors(kRelaxedLoad);
result->set_owns_descriptors(false);
result->UpdateDescriptors(isolate, descriptors, map->GetLayoutDescriptor(),
number_of_own_descriptors);
@@ -1669,7 +1666,7 @@ Handle<Map> Map::ShareDescriptor(Isolate* isolate, Handle<Map> map,
// array, implying that its NumberOfOwnDescriptors equals the number of
// descriptors in the descriptor array.
DCHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors().number_of_descriptors());
+ map->instance_descriptors(kRelaxedLoad).number_of_descriptors());
Handle<Map> result = CopyDropDescriptors(isolate, map);
Handle<Name> name = descriptor->GetKey();
@@ -1687,7 +1684,7 @@ Handle<Map> Map::ShareDescriptor(Isolate* isolate, Handle<Map> map,
} else {
int slack = SlackForArraySize(old_size, kMaxNumberOfDescriptors);
EnsureDescriptorSlack(isolate, map, slack);
- descriptors = handle(map->instance_descriptors(), isolate);
+ descriptors = handle(map->instance_descriptors(kRelaxedLoad), isolate);
}
}
@@ -1721,8 +1718,9 @@ void Map::ConnectTransition(Isolate* isolate, Handle<Map> parent,
} else if (!parent->IsDetached(isolate)) {
// |parent| is initial map and it must not contain descriptors in the
// descriptors array that do not belong to the map.
- DCHECK_EQ(parent->NumberOfOwnDescriptors(),
- parent->instance_descriptors().number_of_descriptors());
+ DCHECK_EQ(
+ parent->NumberOfOwnDescriptors(),
+ parent->instance_descriptors(kRelaxedLoad).number_of_descriptors());
}
if (parent->IsDetached(isolate)) {
DCHECK(child->IsDetached(isolate));
@@ -1846,14 +1844,15 @@ void Map::InstallDescriptors(Isolate* isolate, Handle<Map> parent,
Handle<LayoutDescriptor> layout_descriptor =
LayoutDescriptor::AppendIfFastOrUseFull(isolate, parent, details,
full_layout_descriptor);
- child->set_layout_descriptor(*layout_descriptor);
+ child->set_layout_descriptor(*layout_descriptor, kReleaseStore);
#ifdef VERIFY_HEAP
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
- CHECK(child->layout_descriptor().IsConsistentWithMap(*child));
+ CHECK(child->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*child));
}
#else
- SLOW_DCHECK(child->layout_descriptor().IsConsistentWithMap(*child));
+ SLOW_DCHECK(
+ child->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*child));
#endif
child->set_visitor_id(Map::GetVisitorId(*child));
}
@@ -1959,12 +1958,14 @@ Handle<Map> Map::CopyForElementsTransition(Isolate* isolate, Handle<Map> map) {
// transfer ownership to the new map.
// The properties did not change, so reuse descriptors.
map->set_owns_descriptors(false);
- new_map->InitializeDescriptors(isolate, map->instance_descriptors(),
+ new_map->InitializeDescriptors(isolate,
+ map->instance_descriptors(kRelaxedLoad),
map->GetLayoutDescriptor());
} else {
// In case the map did not own its own descriptors, a split is forced by
// copying the map; creating a new descriptor array cell.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
isolate, descriptors, number_of_own_descriptors);
@@ -1977,7 +1978,8 @@ Handle<Map> Map::CopyForElementsTransition(Isolate* isolate, Handle<Map> map) {
}
Handle<Map> Map::Copy(Isolate* isolate, Handle<Map> map, const char* reason) {
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
isolate, descriptors, number_of_own_descriptors);
@@ -2018,8 +2020,8 @@ Handle<Map> Map::CopyForPreventExtensions(
bool old_map_is_dictionary_elements_kind) {
int num_descriptors = map->NumberOfOwnDescriptors();
Handle<DescriptorArray> new_desc = DescriptorArray::CopyUpToAddAttributes(
- isolate, handle(map->instance_descriptors(), isolate), num_descriptors,
- attrs_to_add);
+ isolate, handle(map->instance_descriptors(kRelaxedLoad), isolate),
+ num_descriptors, attrs_to_add);
Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
isolate);
// Do not track transitions during bootstrapping.
@@ -2115,13 +2117,14 @@ Handle<Map> UpdateDescriptorForValue(Isolate* isolate, Handle<Map> map,
InternalIndex descriptor,
PropertyConstness constness,
Handle<Object> value) {
- if (CanHoldValue(map->instance_descriptors(), descriptor, constness,
- *value)) {
+ if (CanHoldValue(map->instance_descriptors(kRelaxedLoad), descriptor,
+ constness, *value)) {
return map;
}
- PropertyAttributes attributes =
- map->instance_descriptors().GetDetails(descriptor).attributes();
+ PropertyAttributes attributes = map->instance_descriptors(kRelaxedLoad)
+ .GetDetails(descriptor)
+ .attributes();
Representation representation = value->OptimalRepresentation(isolate);
Handle<FieldType> type = value->OptimalType(isolate, representation);
@@ -2168,9 +2171,9 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
Handle<Map> transition(maybe_transition, isolate);
InternalIndex descriptor = transition->LastAdded();
- DCHECK_EQ(
- attributes,
- transition->instance_descriptors().GetDetails(descriptor).attributes());
+ DCHECK_EQ(attributes, transition->instance_descriptors(kRelaxedLoad)
+ .GetDetails(descriptor)
+ .attributes());
return UpdateDescriptorForValue(isolate, transition, descriptor, constness,
value);
@@ -2288,7 +2291,8 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
.SearchTransition(*name, kAccessor, attributes);
if (!maybe_transition.is_null()) {
Handle<Map> transition(maybe_transition, isolate);
- DescriptorArray descriptors = transition->instance_descriptors();
+ DescriptorArray descriptors =
+ transition->instance_descriptors(kRelaxedLoad);
InternalIndex descriptor = transition->LastAdded();
DCHECK(descriptors.GetKey(descriptor).Equals(*name));
@@ -2311,7 +2315,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
}
Handle<AccessorPair> pair;
- DescriptorArray old_descriptors = map->instance_descriptors();
+ DescriptorArray old_descriptors = map->instance_descriptors(kRelaxedLoad);
if (descriptor.is_found()) {
if (descriptor != map->LastAdded()) {
return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonLast");
@@ -2372,7 +2376,8 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
Handle<Map> Map::CopyAddDescriptor(Isolate* isolate, Handle<Map> map,
Descriptor* descriptor,
TransitionFlag flag) {
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate);
// Share descriptors only if map owns descriptors and it not an initial map.
if (flag == INSERT_TRANSITION && map->owns_descriptors() &&
@@ -2399,7 +2404,8 @@ Handle<Map> Map::CopyAddDescriptor(Isolate* isolate, Handle<Map> map,
Handle<Map> Map::CopyInsertDescriptor(Isolate* isolate, Handle<Map> map,
Descriptor* descriptor,
TransitionFlag flag) {
- Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> old_descriptors(
+ map->instance_descriptors(kRelaxedLoad), isolate);
// We replace the key if it is already present.
InternalIndex index =
@@ -2479,9 +2485,10 @@ bool Map::EquivalentToForTransition(const Map other) const {
if (instance_type() == JS_FUNCTION_TYPE) {
// JSFunctions require more checks to ensure that sloppy function is
// not equivalent to strict function.
- int nof = Min(NumberOfOwnDescriptors(), other.NumberOfOwnDescriptors());
- return instance_descriptors().IsEqualUpTo(other.instance_descriptors(),
- nof);
+ int nof =
+ std::min(NumberOfOwnDescriptors(), other.NumberOfOwnDescriptors());
+ return instance_descriptors(kRelaxedLoad)
+ .IsEqualUpTo(other.instance_descriptors(kRelaxedLoad), nof);
}
return true;
}
@@ -2492,7 +2499,7 @@ bool Map::EquivalentToForElementsKindTransition(const Map other) const {
// Ensure that we don't try to generate elements kind transitions from maps
// with fields that may be generalized in-place. This must already be handled
// during addition of a new field.
- DescriptorArray descriptors = instance_descriptors();
+ DescriptorArray descriptors = instance_descriptors(kRelaxedLoad);
for (InternalIndex i : IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
if (details.location() == kField) {
@@ -2573,7 +2580,7 @@ void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
int number_of_own_descriptors) {
- set_synchronized_instance_descriptors(descriptors);
+ set_instance_descriptors(descriptors, kReleaseStore);
SetNumberOfOwnDescriptors(number_of_own_descriptors);
#ifndef V8_DISABLE_WRITE_BARRIERS
WriteBarrier::Marking(descriptors, number_of_own_descriptors);
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 007dd77d6e..f55b39acd2 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -20,6 +20,8 @@
namespace v8 {
namespace internal {
+class WasmTypeInfo;
+
enum InstanceType : uint16_t;
#define DATA_ONLY_VISITOR_ID_LIST(V) \
@@ -38,7 +40,6 @@ enum InstanceType : uint16_t;
V(CodeDataContainer) \
V(Context) \
V(DataHandler) \
- V(DescriptorArray) \
V(EmbedderDataArray) \
V(EphemeronHashTable) \
V(FeedbackCell) \
@@ -54,7 +55,6 @@ enum InstanceType : uint16_t;
V(JSWeakCollection) \
V(Map) \
V(NativeContext) \
- V(Oddball) \
V(PreparseData) \
V(PropertyArray) \
V(PropertyCell) \
@@ -71,7 +71,6 @@ enum InstanceType : uint16_t;
V(TransitionArray) \
V(UncompiledDataWithoutPreparseData) \
V(UncompiledDataWithPreparseData) \
- V(WasmCapiFunctionData) \
V(WasmIndirectFunctionTable) \
V(WasmInstanceObject) \
V(WasmArray) \
@@ -105,6 +104,8 @@ enum class ObjectFields {
using MapHandles = std::vector<Handle<Map>>;
+#include "torque-generated/src/objects/map-tq.inc"
+
// All heap objects have a Map that describes their structure.
// A Map contains information about:
// - Size information about the object
@@ -594,14 +595,14 @@ class Map : public HeapObject {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// [instance descriptors]: describes the object.
- DECL_GETTER(synchronized_instance_descriptors, DescriptorArray)
- DECL_GETTER(instance_descriptors, DescriptorArray)
+ DECL_RELAXED_ACCESSORS(instance_descriptors, DescriptorArray)
+ DECL_ACQUIRE_GETTER(instance_descriptors, DescriptorArray)
V8_EXPORT_PRIVATE void SetInstanceDescriptors(Isolate* isolate,
DescriptorArray descriptors,
int number_of_own_descriptors);
// [layout descriptor]: describes the object layout.
- DECL_ACCESSORS(layout_descriptor, LayoutDescriptor)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(layout_descriptor, LayoutDescriptor)
// |layout descriptor| accessor which can be used from GC.
inline LayoutDescriptor layout_descriptor_gc_safe() const;
inline bool HasFastPointerLayout() const;
@@ -862,8 +863,7 @@ class Map : public HeapObject {
// Returns true if given field is unboxed double.
inline bool IsUnboxedDoubleField(FieldIndex index) const;
- inline bool IsUnboxedDoubleField(const Isolate* isolate,
- FieldIndex index) const;
+ inline bool IsUnboxedDoubleField(IsolateRoot isolate, FieldIndex index) const;
void PrintMapDetails(std::ostream& os);
@@ -977,8 +977,7 @@ class Map : public HeapObject {
MaybeHandle<Object> new_value);
// Use the high-level instance_descriptors/SetInstanceDescriptors instead.
- inline void set_synchronized_instance_descriptors(
- DescriptorArray value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ DECL_RELEASE_SETTER(instance_descriptors, DescriptorArray)
static const int kFastPropertiesSoftLimit = 12;
static const int kMaxFastProperties = 128;
@@ -1007,7 +1006,7 @@ class NormalizedMapCache : public WeakFixedArray {
DECL_VERIFIER(NormalizedMapCache)
private:
- friend bool HeapObject::IsNormalizedMapCache(const Isolate* isolate) const;
+ friend bool HeapObject::IsNormalizedMapCache(IsolateRoot isolate) const;
static const int kEntries = 64;
diff --git a/deps/v8/src/objects/maybe-object-inl.h b/deps/v8/src/objects/maybe-object-inl.h
index afb3a93123..6cabc52312 100644
--- a/deps/v8/src/objects/maybe-object-inl.h
+++ b/deps/v8/src/objects/maybe-object-inl.h
@@ -34,6 +34,15 @@ MaybeObject MaybeObject::MakeWeak(MaybeObject object) {
return MaybeObject(object.ptr() | kWeakHeapObjectMask);
}
+// static
+MaybeObject MaybeObject::Create(MaybeObject o) { return o; }
+
+// static
+MaybeObject MaybeObject::Create(Object o) { return FromObject(o); }
+
+// static
+MaybeObject MaybeObject::Create(Smi smi) { return FromSmi(smi); }
+
//
// HeapObjectReference implementation.
//
@@ -69,7 +78,7 @@ HeapObjectReference HeapObjectReference::From(Object object,
}
// static
-HeapObjectReference HeapObjectReference::ClearedValue(const Isolate* isolate) {
+HeapObjectReference HeapObjectReference::ClearedValue(IsolateRoot isolate) {
// Construct cleared weak ref value.
#ifdef V8_COMPRESS_POINTERS
// This is necessary to make pointer decompression computation also
diff --git a/deps/v8/src/objects/maybe-object.h b/deps/v8/src/objects/maybe-object.h
index fd1363498e..3fe69ee5ec 100644
--- a/deps/v8/src/objects/maybe-object.h
+++ b/deps/v8/src/objects/maybe-object.h
@@ -27,6 +27,10 @@ class MaybeObject : public TaggedImpl<HeapObjectReferenceType::WEAK, Address> {
V8_INLINE static MaybeObject MakeWeak(MaybeObject object);
+ V8_INLINE static MaybeObject Create(MaybeObject o);
+ V8_INLINE static MaybeObject Create(Object o);
+ V8_INLINE static MaybeObject Create(Smi smi);
+
#ifdef VERIFY_HEAP
static void VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject p);
#endif
@@ -50,7 +54,7 @@ class HeapObjectReference : public MaybeObject {
V8_INLINE static HeapObjectReference From(Object object,
HeapObjectReferenceType type);
- V8_INLINE static HeapObjectReference ClearedValue(const Isolate* isolate);
+ V8_INLINE static HeapObjectReference ClearedValue(IsolateRoot isolate);
template <typename THeapObjectSlot>
V8_INLINE static void Update(THeapObjectSlot slot, HeapObject value);
diff --git a/deps/v8/src/objects/microtask-inl.h b/deps/v8/src/objects/microtask-inl.h
index 613ee096c5..c9432817e5 100644
--- a/deps/v8/src/objects/microtask-inl.h
+++ b/deps/v8/src/objects/microtask-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/microtask-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(Microtask)
TQ_OBJECT_CONSTRUCTORS_IMPL(CallbackTask)
TQ_OBJECT_CONSTRUCTORS_IMPL(CallableTask)
diff --git a/deps/v8/src/objects/microtask.h b/deps/v8/src/objects/microtask.h
index cd8a71f58c..f2869eadc7 100644
--- a/deps/v8/src/objects/microtask.h
+++ b/deps/v8/src/objects/microtask.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/microtask-tq.inc"
+
// Abstract base class for all microtasks that can be scheduled on the
// microtask queue. This class merely serves the purpose of a marker
// interface.
diff --git a/deps/v8/src/objects/module-inl.h b/deps/v8/src/objects/module-inl.h
index e627aedf18..c72cf2ad0c 100644
--- a/deps/v8/src/objects/module-inl.h
+++ b/deps/v8/src/objects/module-inl.h
@@ -6,12 +6,12 @@
#define V8_OBJECTS_MODULE_INL_H_
#include "src/objects/module.h"
-#include "src/objects/source-text-module.h"
-#include "src/objects/synthetic-module.h"
-
#include "src/objects/objects-inl.h" // Needed for write barriers
#include "src/objects/scope-info.h"
+#include "src/objects/source-text-module-inl.h"
+#include "src/objects/source-text-module.h"
#include "src/objects/string-inl.h"
+#include "src/objects/synthetic-module.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -19,13 +19,13 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/module-tq-inl.inc"
+
OBJECT_CONSTRUCTORS_IMPL(Module, HeapObject)
-TQ_OBJECT_CONSTRUCTORS_IMPL(SourceTextModule)
-TQ_OBJECT_CONSTRUCTORS_IMPL(SourceTextModuleInfoEntry)
-TQ_OBJECT_CONSTRUCTORS_IMPL(SyntheticModule)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSModuleNamespace)
NEVER_READ_ONLY_SPACE_IMPL(Module)
+NEVER_READ_ONLY_SPACE_IMPL(ModuleRequest)
NEVER_READ_ONLY_SPACE_IMPL(SourceTextModule)
NEVER_READ_ONLY_SPACE_IMPL(SyntheticModule)
@@ -44,6 +44,12 @@ ACCESSORS(SourceTextModule, async_parent_modules, ArrayList,
ACCESSORS(SourceTextModule, top_level_capability, HeapObject,
kTopLevelCapabilityOffset)
+struct Module::Hash {
+ V8_INLINE size_t operator()(Module const& module) const {
+ return module.hash();
+ }
+};
+
SourceTextModuleInfo SourceTextModule::info() const {
return status() == kErrored
? SourceTextModuleInfo::cast(code())
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index e35870e953..f4c23ae5c4 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/objects/module.h"
+
#include <unordered_map>
#include <unordered_set>
-#include "src/objects/module.h"
-
#include "src/api/api-inl.h"
#include "src/ast/modules.h"
#include "src/builtins/accessors.h"
@@ -16,6 +16,7 @@
#include "src/objects/js-generator-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/synthetic-module-inl.h"
#include "src/utils/ostreams.h"
namespace v8 {
@@ -371,5 +372,38 @@ Maybe<PropertyAttributes> JSModuleNamespace::GetPropertyAttributes(
return Just(it->property_attributes());
}
+bool Module::IsGraphAsync(Isolate* isolate) const {
+ DisallowGarbageCollection no_gc;
+
+ // Only SourceTextModules may be async.
+ if (!IsSourceTextModule()) return false;
+ SourceTextModule root = SourceTextModule::cast(*this);
+
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ const size_t bucket_count = 2;
+ ZoneUnorderedSet<Module, Module::Hash> visited(&zone, bucket_count);
+ ZoneVector<SourceTextModule> worklist(&zone);
+ visited.insert(root);
+ worklist.push_back(root);
+
+ do {
+ SourceTextModule current = worklist.back();
+ worklist.pop_back();
+ DCHECK_GE(current.status(), kInstantiated);
+
+ if (current.async()) return true;
+ FixedArray requested_modules = current.requested_modules();
+ for (int i = 0, length = requested_modules.length(); i < length; ++i) {
+ Module descendant = Module::cast(requested_modules.get(i));
+ if (descendant.IsSourceTextModule()) {
+ const bool cycle = !visited.insert(descendant).second;
+ if (!cycle) worklist.push_back(SourceTextModule::cast(descendant));
+ }
+ }
+ } while (!worklist.empty());
+
+ return false;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index f58454fac2..20be042f82 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -27,6 +27,8 @@ class SourceTextModuleInfoEntry;
class String;
class Zone;
+#include "torque-generated/src/objects/module-tq.inc"
+
// Module is the base class for ECMAScript module types, roughly corresponding
// to Abstract Module Record.
// https://tc39.github.io/ecma262/#sec-abstract-module-records
@@ -63,6 +65,10 @@ class Module : public HeapObject {
Object GetException();
DECL_ACCESSORS(exception, Object)
+ // Returns if this module or any transitively requested module is [[Async]],
+ // i.e. has a top-level await.
+ V8_WARN_UNUSED_RESULT bool IsGraphAsync(Isolate* isolate) const;
+
// Implementation of spec operation ModuleDeclarationInstantiation.
// Returns false if an exception occurred during instantiation, true
// otherwise. (In the case where the callback throws an exception, that
@@ -87,6 +93,8 @@ class Module : public HeapObject {
using BodyDescriptor =
FixedBodyDescriptor<kExportsOffset, kHeaderSize, kHeaderSize>;
+ struct Hash;
+
protected:
friend class Factory;
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index ffcd287fd3..55f5915319 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/name-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(Name)
TQ_OBJECT_CONSTRUCTORS_IMPL(Symbol)
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index 264cb3698e..fc0927083e 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/name-tq.inc"
+
// The Name abstract class captures anything that can be used as a property
// name, i.e., strings and symbols. All names store a hash value.
class Name : public TorqueGeneratedName<Name, PrimitiveHeapObject> {
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
index 9eef5c0dbf..a189f00d27 100644
--- a/deps/v8/src/objects/object-list-macros.h
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -104,7 +104,6 @@ class ZoneForwardList;
V(DataHandler) \
V(DeoptimizationData) \
V(DependentCode) \
- V(DescriptorArray) \
V(EmbedderDataArray) \
V(EphemeronHashTable) \
V(ExternalOneByteString) \
@@ -181,7 +180,6 @@ class ZoneForwardList;
V(NumberWrapper) \
V(ObjectHashSet) \
V(ObjectHashTable) \
- V(Oddball) \
V(OrderedHashMap) \
V(OrderedHashSet) \
V(OrderedNameDictionary) \
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index b96c03c00f..82b4f36251 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -32,9 +32,9 @@
#undef ACCESSORS_CHECKED2
#undef ACCESSORS_CHECKED
#undef ACCESSORS
-#undef SYNCHRONIZED_ACCESSORS_CHECKED2
-#undef SYNCHRONIZED_ACCESSORS_CHECKED
-#undef SYNCHRONIZED_ACCESSORS
+#undef RELEASE_ACQUIRE_ACCESSORS_CHECKED2
+#undef RELEASE_ACQUIRE_ACCESSORS_CHECKED
+#undef RELEASE_ACQUIRE_ACCESSORS
#undef WEAK_ACCESSORS_CHECKED2
#undef WEAK_ACCESSORS_CHECKED
#undef WEAK_ACCESSORS
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index b4fc7717fe..3aa56bfbde 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -82,25 +82,47 @@
// parameter.
#define DECL_GETTER(name, type) \
inline type name() const; \
- inline type name(const Isolate* isolate) const;
+ inline type name(IsolateRoot isolate) const;
-#define DEF_GETTER(holder, name, type) \
- type holder::name() const { \
- const Isolate* isolate = GetIsolateForPtrCompr(*this); \
- return holder::name(isolate); \
- } \
- type holder::name(const Isolate* isolate) const
+#define DEF_GETTER(holder, name, type) \
+ type holder::name() const { \
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this); \
+ return holder::name(isolate); \
+ } \
+ type holder::name(IsolateRoot isolate) const
#define DECL_ACCESSORS(name, type) \
DECL_GETTER(name, type) \
inline void set_##name(type value, \
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-// TODO(solanes, neis): Unify naming for synchronized accessor uses.
-#define DECL_SYNCHRONIZED_ACCESSORS(name, type) \
- DECL_GETTER(synchronized_##name, type) \
- inline void set_synchronized_##name( \
- type value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+#define DECL_ACCESSORS_LOAD_TAG(name, type, tag_type) \
+ inline type name(tag_type tag) const; \
+ inline type name(IsolateRoot isolate, tag_type) const;
+
+#define DECL_ACCESSORS_STORE_TAG(name, type, tag_type) \
+ inline void set_##name(type value, tag_type, \
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+#define DECL_RELAXED_GETTER(name, type) \
+ DECL_ACCESSORS_LOAD_TAG(name, type, RelaxedLoadTag)
+
+#define DECL_RELAXED_SETTER(name, type) \
+ DECL_ACCESSORS_STORE_TAG(name, type, RelaxedStoreTag)
+
+#define DECL_RELAXED_ACCESSORS(name, type) \
+ DECL_RELAXED_GETTER(name, type) \
+ DECL_RELAXED_SETTER(name, type)
+
+#define DECL_ACQUIRE_GETTER(name, type) \
+ DECL_ACCESSORS_LOAD_TAG(name, type, AcquireLoadTag)
+
+#define DECL_RELEASE_SETTER(name, type) \
+ DECL_ACCESSORS_STORE_TAG(name, type, ReleaseStoreTag)
+
+#define DECL_RELEASE_ACQUIRE_ACCESSORS(name, type) \
+ DECL_ACQUIRE_GETTER(name, type) \
+ DECL_RELEASE_SETTER(name, type)
#define DECL_CAST(Type) \
V8_INLINE static Type cast(Object object); \
@@ -162,25 +184,55 @@
#define ACCESSORS(holder, name, type, offset) \
ACCESSORS_CHECKED(holder, name, type, offset, true)
-#define SYNCHRONIZED_ACCESSORS_CHECKED2(holder, name, type, offset, \
- get_condition, set_condition) \
- DEF_GETTER(holder, name, type) { \
+#define RELAXED_ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
+ set_condition) \
+ type holder::name(RelaxedLoadTag tag) const { \
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this); \
+ return holder::name(isolate, tag); \
+ } \
+ type holder::name(IsolateRoot isolate, RelaxedLoadTag) const { \
+ type value = TaggedField<type, offset>::load(isolate, *this); \
+ DCHECK(get_condition); \
+ return value; \
+ } \
+ void holder::set_##name(type value, RelaxedStoreTag, \
+ WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ TaggedField<type, offset>::store(*this, value); \
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
+ }
+
+#define RELAXED_ACCESSORS_CHECKED(holder, name, type, offset, condition) \
+ RELAXED_ACCESSORS_CHECKED2(holder, name, type, offset, condition, condition)
+
+#define RELAXED_ACCESSORS(holder, name, type, offset) \
+ RELAXED_ACCESSORS_CHECKED(holder, name, type, offset, true)
+
+#define RELEASE_ACQUIRE_ACCESSORS_CHECKED2(holder, name, type, offset, \
+ get_condition, set_condition) \
+ type holder::name(AcquireLoadTag tag) const { \
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this); \
+ return holder::name(isolate, tag); \
+ } \
+ type holder::name(IsolateRoot isolate, AcquireLoadTag) const { \
type value = TaggedField<type, offset>::Acquire_Load(isolate, *this); \
DCHECK(get_condition); \
return value; \
} \
- void holder::set_##name(type value, WriteBarrierMode mode) { \
+ void holder::set_##name(type value, ReleaseStoreTag, \
+ WriteBarrierMode mode) { \
DCHECK(set_condition); \
TaggedField<type, offset>::Release_Store(*this, value); \
CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
}
-#define SYNCHRONIZED_ACCESSORS_CHECKED(holder, name, type, offset, condition) \
- SYNCHRONIZED_ACCESSORS_CHECKED2(holder, name, type, offset, condition, \
- condition)
+#define RELEASE_ACQUIRE_ACCESSORS_CHECKED(holder, name, type, offset, \
+ condition) \
+ RELEASE_ACQUIRE_ACCESSORS_CHECKED2(holder, name, type, offset, condition, \
+ condition)
-#define SYNCHRONIZED_ACCESSORS(holder, name, type, offset) \
- SYNCHRONIZED_ACCESSORS_CHECKED(holder, name, type, offset, true)
+#define RELEASE_ACQUIRE_ACCESSORS(holder, name, type, offset) \
+ RELEASE_ACQUIRE_ACCESSORS_CHECKED(holder, name, type, offset, true)
#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
set_condition) \
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index 275ac9a9e6..a7571ae288 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -8,6 +8,7 @@
#include <algorithm>
#include "src/codegen/reloc-info.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/cell.h"
#include "src/objects/data-handler.h"
#include "src/objects/foreign-inl.h"
@@ -19,11 +20,9 @@
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/source-text-module.h"
#include "src/objects/synthetic-module.h"
+#include "src/objects/torque-defined-classes-inl.h"
#include "src/objects/transitions.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "torque-generated/class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/internal-class-definitions-inl.h"
namespace v8 {
namespace internal {
@@ -946,9 +945,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
p4);
case PROPERTY_ARRAY_TYPE:
return Op::template apply<PropertyArray::BodyDescriptor>(p1, p2, p3, p4);
- case DESCRIPTOR_ARRAY_TYPE:
- return Op::template apply<DescriptorArray::BodyDescriptor>(p1, p2, p3,
- p4);
case TRANSITION_ARRAY_TYPE:
return Op::template apply<TransitionArray::BodyDescriptor>(p1, p2, p3,
p4);
@@ -1032,8 +1028,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
return Op::template apply<WeakCell::BodyDescriptor>(p1, p2, p3, p4);
case JS_WEAK_REF_TYPE:
return Op::template apply<JSWeakRef::BodyDescriptor>(p1, p2, p3, p4);
- case ODDBALL_TYPE:
- return Op::template apply<Oddball::BodyDescriptor>(p1, p2, p3, p4);
case JS_PROXY_TYPE:
return Op::template apply<JSProxy::BodyDescriptor>(p1, p2, p3, p4);
case FOREIGN_TYPE:
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index 30d5bb6ec4..54fab1107a 100644
--- a/deps/v8/src/objects/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -135,6 +135,7 @@ namespace internal {
function_template_rare_data) \
V(_, INTERCEPTOR_INFO_TYPE, InterceptorInfo, interceptor_info) \
V(_, INTERPRETER_DATA_TYPE, InterpreterData, interpreter_data) \
+ V(_, MODULE_REQUEST_TYPE, ModuleRequest, module_request) \
V(_, PROMISE_CAPABILITY_TYPE, PromiseCapability, promise_capability) \
V(_, PROMISE_REACTION_TYPE, PromiseReaction, promise_reaction) \
V(_, PROPERTY_DESCRIPTOR_OBJECT_TYPE, PropertyDescriptorObject, \
@@ -148,8 +149,6 @@ namespace internal {
V(_, TEMPLATE_OBJECT_DESCRIPTION_TYPE, TemplateObjectDescription, \
template_object_description) \
V(_, TUPLE2_TYPE, Tuple2, tuple2) \
- V(_, WASM_CAPI_FUNCTION_DATA_TYPE, WasmCapiFunctionData, \
- wasm_capi_function_data) \
V(_, WASM_EXCEPTION_TAG_TYPE, WasmExceptionTag, wasm_exception_tag) \
V(_, WASM_EXPORTED_FUNCTION_DATA_TYPE, WasmExportedFunctionData, \
wasm_exported_function_data) \
diff --git a/deps/v8/src/objects/objects-inl.h b/deps/v8/src/objects/objects-inl.h
index c8ceea8f9a..65ac811e44 100644
--- a/deps/v8/src/objects/objects-inl.h
+++ b/deps/v8/src/objects/objects-inl.h
@@ -15,6 +15,7 @@
#include "src/base/bits.h"
#include "src/base/memory.h"
#include "src/builtins/builtins.h"
+#include "src/common/external-pointer-inl.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -42,7 +43,6 @@
#include "src/objects/tagged-index.h"
#include "src/objects/templates.h"
#include "src/sanitizer/tsan.h"
-#include "torque-generated/class-definitions-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -78,7 +78,7 @@ bool Object::IsTaggedIndex() const {
bool Object::Is##type_() const { \
return IsHeapObject() && HeapObject::cast(*this).Is##type_(); \
} \
- bool Object::Is##type_(const Isolate* isolate) const { \
+ bool Object::Is##type_(IsolateRoot isolate) const { \
return IsHeapObject() && HeapObject::cast(*this).Is##type_(isolate); \
}
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF)
@@ -232,23 +232,23 @@ DEF_GETTER(HeapObject, IsExternalTwoByteString, bool) {
bool Object::IsNumber() const {
if (IsSmi()) return true;
HeapObject this_heap_object = HeapObject::cast(*this);
- const Isolate* isolate = GetIsolateForPtrCompr(this_heap_object);
+ IsolateRoot isolate = GetIsolateForPtrCompr(this_heap_object);
return this_heap_object.IsHeapNumber(isolate);
}
-bool Object::IsNumber(const Isolate* isolate) const {
+bool Object::IsNumber(IsolateRoot isolate) const {
return IsSmi() || IsHeapNumber(isolate);
}
bool Object::IsNumeric() const {
if (IsSmi()) return true;
HeapObject this_heap_object = HeapObject::cast(*this);
- const Isolate* isolate = GetIsolateForPtrCompr(this_heap_object);
+ IsolateRoot isolate = GetIsolateForPtrCompr(this_heap_object);
return this_heap_object.IsHeapNumber(isolate) ||
this_heap_object.IsBigInt(isolate);
}
-bool Object::IsNumeric(const Isolate* isolate) const {
+bool Object::IsNumeric(IsolateRoot isolate) const {
return IsNumber(isolate) || IsBigInt(isolate);
}
@@ -276,11 +276,11 @@ DEF_GETTER(HeapObject, IsRegExpMatchInfo, bool) {
bool Object::IsLayoutDescriptor() const {
if (IsSmi()) return true;
HeapObject this_heap_object = HeapObject::cast(*this);
- const Isolate* isolate = GetIsolateForPtrCompr(this_heap_object);
+ IsolateRoot isolate = GetIsolateForPtrCompr(this_heap_object);
return this_heap_object.IsByteArray(isolate);
}
-bool Object::IsLayoutDescriptor(const Isolate* isolate) const {
+bool Object::IsLayoutDescriptor(IsolateRoot isolate) const {
return IsSmi() || IsByteArray(isolate);
}
@@ -385,11 +385,11 @@ DEF_GETTER(HeapObject, IsWasmExceptionPackage, bool) {
bool Object::IsPrimitive() const {
if (IsSmi()) return true;
HeapObject this_heap_object = HeapObject::cast(*this);
- const Isolate* isolate = GetIsolateForPtrCompr(this_heap_object);
+ IsolateRoot isolate = GetIsolateForPtrCompr(this_heap_object);
return this_heap_object.map(isolate).IsPrimitiveMap();
}
-bool Object::IsPrimitive(const Isolate* isolate) const {
+bool Object::IsPrimitive(IsolateRoot isolate) const {
return IsSmi() || HeapObject::cast(*this).map(isolate).IsPrimitiveMap();
}
@@ -419,7 +419,7 @@ DEF_GETTER(HeapObject, IsAccessCheckNeeded, bool) {
bool Object::Is##Name() const { \
return IsHeapObject() && HeapObject::cast(*this).Is##Name(); \
} \
- bool Object::Is##Name(const Isolate* isolate) const { \
+ bool Object::Is##Name(IsolateRoot isolate) const { \
return IsHeapObject() && HeapObject::cast(*this).Is##Name(isolate); \
}
STRUCT_LIST(MAKE_STRUCT_PREDICATE)
@@ -485,7 +485,7 @@ bool Object::FilterKey(PropertyFilter filter) {
return false;
}
-Representation Object::OptimalRepresentation(const Isolate* isolate) const {
+Representation Object::OptimalRepresentation(IsolateRoot isolate) const {
if (!FLAG_track_fields) return Representation::Tagged();
if (IsSmi()) {
return Representation::Smi();
@@ -504,7 +504,7 @@ Representation Object::OptimalRepresentation(const Isolate* isolate) const {
}
}
-ElementsKind Object::OptimalElementsKind(const Isolate* isolate) const {
+ElementsKind Object::OptimalElementsKind(IsolateRoot isolate) const {
if (IsSmi()) return PACKED_SMI_ELEMENTS;
if (IsNumber(isolate)) return PACKED_DOUBLE_ELEMENTS;
return PACKED_ELEMENTS;
@@ -640,12 +640,31 @@ MaybeHandle<Object> Object::SetElement(Isolate* isolate, Handle<Object> object,
return value;
}
+void Object::InitExternalPointerField(size_t offset, Isolate* isolate) {
+ i::InitExternalPointerField(field_address(offset), isolate);
+}
+
+void Object::InitExternalPointerField(size_t offset, Isolate* isolate,
+ Address value, ExternalPointerTag tag) {
+ i::InitExternalPointerField(field_address(offset), isolate, value, tag);
+}
+
+Address Object::ReadExternalPointerField(size_t offset, IsolateRoot isolate,
+ ExternalPointerTag tag) const {
+ return i::ReadExternalPointerField(field_address(offset), isolate, tag);
+}
+
+void Object::WriteExternalPointerField(size_t offset, Isolate* isolate,
+ Address value, ExternalPointerTag tag) {
+ i::WriteExternalPointerField(field_address(offset), isolate, value, tag);
+}
+
ObjectSlot HeapObject::RawField(int byte_offset) const {
- return ObjectSlot(FIELD_ADDR(*this, byte_offset));
+ return ObjectSlot(field_address(byte_offset));
}
MaybeObjectSlot HeapObject::RawMaybeWeakField(int byte_offset) const {
- return MaybeObjectSlot(FIELD_ADDR(*this, byte_offset));
+ return MaybeObjectSlot(field_address(byte_offset));
}
MapWord MapWord::FromMap(const Map map) { return MapWord(map.ptr()); }
@@ -686,10 +705,10 @@ ReadOnlyRoots HeapObject::GetReadOnlyRoots() const {
return ReadOnlyHeap::GetReadOnlyRoots(*this);
}
-ReadOnlyRoots HeapObject::GetReadOnlyRoots(const Isolate* isolate) const {
+ReadOnlyRoots HeapObject::GetReadOnlyRoots(IsolateRoot isolate) const {
#ifdef V8_COMPRESS_POINTERS
- DCHECK_NOT_NULL(isolate);
- return ReadOnlyRoots(const_cast<Isolate*>(isolate));
+ DCHECK_NE(isolate.address(), 0);
+ return ReadOnlyRoots(Isolate::FromRootAddress(isolate.address()));
#else
return GetReadOnlyRoots();
#endif
@@ -775,8 +794,8 @@ void HeapObject::synchronized_set_map_word(MapWord map_word) {
MapField::Release_Store(*this, map_word);
}
-bool HeapObject::synchronized_compare_and_swap_map_word(MapWord old_map_word,
- MapWord new_map_word) {
+bool HeapObject::release_compare_and_swap_map_word(MapWord old_map_word,
+ MapWord new_map_word) {
Tagged_t result =
MapField::Release_CompareAndSwap(*this, old_map_word, new_map_word);
return result == static_cast<Tagged_t>(old_map_word.ptr());
@@ -905,7 +924,7 @@ AllocationAlignment HeapObject::RequiredAlignment(Map map) {
}
Address HeapObject::GetFieldAddress(int field_offset) const {
- return FIELD_ADDR(*this, field_offset);
+ return field_address(field_offset);
}
// static
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index bb33b5d097..5c67fa388f 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -51,7 +51,7 @@
#include "src/objects/bigint.h"
#include "src/objects/cell-inl.h"
#include "src/objects/code-inl.h"
-#include "src/objects/compilation-cache-inl.h"
+#include "src/objects/compilation-cache-table-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/elements.h"
#include "src/objects/embedder-data-array-inl.h"
@@ -63,6 +63,7 @@
#include "src/objects/free-space-inl.h"
#include "src/objects/function-kind.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/instance-type.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/keys.h"
#include "src/objects/lookup-inl.h"
@@ -126,9 +127,6 @@
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
#include "src/zone/zone.h"
-#include "torque-generated/class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/internal-class-definitions-inl.h"
namespace v8 {
namespace internal {
@@ -1304,7 +1302,7 @@ bool FunctionTemplateInfo::IsTemplateFor(Map map) {
Object type;
if (cons_obj.IsJSFunction()) {
JSFunction fun = JSFunction::cast(cons_obj);
- type = fun.shared().function_data();
+ type = fun.shared().function_data(kAcquireLoad);
} else if (cons_obj.IsFunctionTemplateInfo()) {
type = FunctionTemplateInfo::cast(cons_obj);
} else {
@@ -1459,7 +1457,7 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
if (info->replace_on_access() && receiver->IsJSReceiver()) {
RETURN_ON_EXCEPTION(isolate,
Accessors::ReplaceAccessorWithDataProperty(
- receiver, holder, name, result),
+ isolate, receiver, holder, name, result),
Object);
}
return reboxed_result;
@@ -1811,6 +1809,11 @@ bool Object::IterationHasObservableEffects() {
return true;
}
+bool Object::IsCodeLike(Isolate* isolate) const {
+ DisallowGarbageCollection no_gc;
+ return IsJSReceiver() && JSReceiver::cast(*this).IsCodeLike(isolate);
+}
+
void Object::ShortPrint(FILE* out) const {
OFStream os(out);
os << Brief(*this);
@@ -2241,7 +2244,8 @@ int HeapObject::SizeFromMap(Map map) const {
return FeedbackMetadata::SizeFor(
FeedbackMetadata::unchecked_cast(*this).synchronized_slot_count());
}
- if (instance_type == DESCRIPTOR_ARRAY_TYPE) {
+ if (base::IsInRange(instance_type, FIRST_DESCRIPTOR_ARRAY_TYPE,
+ LAST_DESCRIPTOR_ARRAY_TYPE)) {
return DescriptorArray::SizeFor(
DescriptorArray::unchecked_cast(*this).number_of_all_descriptors());
}
@@ -2304,8 +2308,14 @@ int HeapObject::SizeFromMap(Map map) const {
}
bool HeapObject::NeedsRehashing() const {
- switch (map().instance_type()) {
+ return NeedsRehashing(map().instance_type());
+}
+
+bool HeapObject::NeedsRehashing(InstanceType instance_type) const {
+ DCHECK_EQ(instance_type, map().instance_type());
+ switch (instance_type) {
case DESCRIPTOR_ARRAY_TYPE:
+ case STRONG_DESCRIPTOR_ARRAY_TYPE:
return DescriptorArray::cast(*this).number_of_descriptors() > 1;
case TRANSITION_ARRAY_TYPE:
return TransitionArray::cast(*this).number_of_entries() > 1;
@@ -2345,6 +2355,7 @@ bool HeapObject::CanBeRehashed() const {
case SIMPLE_NUMBER_DICTIONARY_TYPE:
return true;
case DESCRIPTOR_ARRAY_TYPE:
+ case STRONG_DESCRIPTOR_ARRAY_TYPE:
return true;
case TRANSITION_ARRAY_TYPE:
return true;
@@ -3508,11 +3519,20 @@ Maybe<bool> JSProxy::SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
return Just(true);
}
- Handle<NameDictionary> dict(proxy->property_dictionary(), isolate);
PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell);
- Handle<NameDictionary> result =
- NameDictionary::Add(isolate, dict, private_name, value, details);
- if (!dict.is_identical_to(result)) proxy->SetProperties(*result);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ Handle<OrderedNameDictionary> dict(proxy->property_dictionary_ordered(),
+ isolate);
+ Handle<OrderedNameDictionary> result =
+ OrderedNameDictionary::Add(isolate, dict, private_name, value, details)
+ .ToHandleChecked();
+ if (!dict.is_identical_to(result)) proxy->SetProperties(*result);
+ } else {
+ Handle<NameDictionary> dict(proxy->property_dictionary(), isolate);
+ Handle<NameDictionary> result =
+ NameDictionary::Add(isolate, dict, private_name, value, details);
+ if (!dict.is_identical_to(result)) proxy->SetProperties(*result);
+ }
return Just(true);
}
@@ -3946,7 +3966,7 @@ Handle<FixedArray> EnsureSpaceInFixedArray(Isolate* isolate,
int capacity = array->length();
if (capacity < length) {
int new_capacity = length;
- new_capacity = new_capacity + Max(new_capacity / 2, 2);
+ new_capacity = new_capacity + std::max(new_capacity / 2, 2);
int grow_by = new_capacity - capacity;
array = isolate->factory()->CopyFixedArrayAndGrow(array, grow_by);
}
@@ -4781,30 +4801,43 @@ bool Script::ContainsAsmModule() {
}
namespace {
-bool GetPositionInfoSlow(const Script script, int position,
- Script::PositionInfo* info) {
- if (!script.source().IsString()) return false;
- if (position < 0) position = 0;
- String source_string = String::cast(script.source());
+template <typename Char>
+bool GetPositionInfoSlowImpl(const Vector<Char>& source, int position,
+ Script::PositionInfo* info) {
+ if (position < 0) {
+ position = 0;
+ }
int line = 0;
- int line_start = 0;
- int len = source_string.length();
- for (int pos = 0; pos <= len; ++pos) {
- if (pos == len || source_string.Get(pos) == '\n') {
- if (position <= pos) {
- info->line = line;
- info->column = position - line_start;
- info->line_start = line_start;
- info->line_end = pos;
- return true;
- }
- line++;
- line_start = pos + 1;
+ const auto begin = std::cbegin(source);
+ const auto end = std::cend(source);
+ for (auto line_begin = begin; line_begin < end;) {
+ const auto line_end = std::find(line_begin, end, '\n');
+ if (position <= (line_end - begin)) {
+ info->line = line;
+ info->column = static_cast<int>((begin + position) - line_begin);
+ info->line_start = static_cast<int>(line_begin - begin);
+ info->line_end = static_cast<int>(line_end - begin);
+ return true;
}
+ ++line;
+ line_begin = line_end + 1;
}
return false;
}
+bool GetPositionInfoSlow(const Script script, int position,
+ const DisallowHeapAllocation& no_gc,
+ Script::PositionInfo* info) {
+ if (!script.source().IsString()) {
+ return false;
+ }
+ auto source = String::cast(script.source());
+ const auto flat = source.GetFlatContent(no_gc);
+ return flat.IsOneByte()
+ ? GetPositionInfoSlowImpl(flat.ToOneByteVector(), position, info)
+ : GetPositionInfoSlowImpl(flat.ToUC16Vector(), position, info);
+}
+
} // namespace
bool Script::GetPositionInfo(int position, PositionInfo* info,
@@ -4826,7 +4859,9 @@ bool Script::GetPositionInfo(int position, PositionInfo* info,
if (line_ends().IsUndefined()) {
// Slow mode: we do not have line_ends. We have to iterate through source.
- if (!GetPositionInfoSlow(*this, position, info)) return false;
+ if (!GetPositionInfoSlow(*this, position, no_allocation, info)) {
+ return false;
+ }
} else {
DCHECK(line_ends().IsFixedArray());
FixedArray ends = FixedArray::cast(line_ends());
@@ -5109,9 +5144,11 @@ bool JSArray::MayHaveReadOnlyLength(Map js_array_map) {
// dictionary properties. Since it's not configurable, it's guaranteed to be
// the first in the descriptor array.
InternalIndex first(0);
- DCHECK(js_array_map.instance_descriptors().GetKey(first) ==
+ DCHECK(js_array_map.instance_descriptors(kRelaxedLoad).GetKey(first) ==
js_array_map.GetReadOnlyRoots().length_string());
- return js_array_map.instance_descriptors().GetDetails(first).IsReadOnly();
+ return js_array_map.instance_descriptors(kRelaxedLoad)
+ .GetDetails(first)
+ .IsReadOnly();
}
bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
@@ -5144,7 +5181,7 @@ bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array, uint32_t index) {
template <typename Derived, typename Shape>
void Dictionary<Derived, Shape>::Print(std::ostream& os) {
DisallowHeapAllocation no_gc;
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
ReadOnlyRoots roots = this->GetReadOnlyRoots(isolate);
Derived dictionary = Derived::cast(*this);
for (InternalIndex i : dictionary.IterateEntries()) {
@@ -5198,65 +5235,6 @@ void Symbol::SymbolShortPrint(std::ostream& os) {
os << ">";
}
-// StringSharedKeys are used as keys in the eval cache.
-class StringSharedKey : public HashTableKey {
- public:
- // This tuple unambiguously identifies calls to eval() or
- // CreateDynamicFunction() (such as through the Function() constructor).
- // * source is the string passed into eval(). For dynamic functions, this is
- // the effective source for the function, some of which is implicitly
- // generated.
- // * shared is the shared function info for the function containing the call
- // to eval(). for dynamic functions, shared is the native context closure.
- // * When positive, position is the position in the source where eval is
- // called. When negative, position is the negation of the position in the
- // dynamic function's effective source where the ')' ends the parameters.
- StringSharedKey(Handle<String> source, Handle<SharedFunctionInfo> shared,
- LanguageMode language_mode, int position)
- : HashTableKey(CompilationCacheShape::StringSharedHash(
- *source, *shared, language_mode, position)),
- source_(source),
- shared_(shared),
- language_mode_(language_mode),
- position_(position) {}
-
- bool IsMatch(Object other) override {
- DisallowHeapAllocation no_allocation;
- if (!other.IsFixedArray()) {
- DCHECK(other.IsNumber());
- uint32_t other_hash = static_cast<uint32_t>(other.Number());
- return Hash() == other_hash;
- }
- FixedArray other_array = FixedArray::cast(other);
- SharedFunctionInfo shared = SharedFunctionInfo::cast(other_array.get(0));
- if (shared != *shared_) return false;
- int language_unchecked = Smi::ToInt(other_array.get(2));
- DCHECK(is_valid_language_mode(language_unchecked));
- LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
- if (language_mode != language_mode_) return false;
- int position = Smi::ToInt(other_array.get(3));
- if (position != position_) return false;
- String source = String::cast(other_array.get(1));
- return source.Equals(*source_);
- }
-
- Handle<Object> AsHandle(Isolate* isolate) {
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(4);
- array->set(0, *shared_);
- array->set(1, *source_);
- array->set(2, Smi::FromEnum(language_mode_));
- array->set(3, Smi::FromInt(position_));
- array->set_map(ReadOnlyRoots(isolate).fixed_cow_array_map());
- return array;
- }
-
- private:
- Handle<String> source_;
- Handle<SharedFunctionInfo> shared_;
- LanguageMode language_mode_;
- int position_;
-};
-
v8::Promise::PromiseState JSPromise::status() const {
int value = flags() & StatusBits::kMask;
DCHECK(value == 0 || value == 1 || value == 2);
@@ -5420,6 +5398,11 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
// 10. If then is an abrupt completion, then
Handle<Object> then_action;
if (!then.ToHandle(&then_action)) {
+ // The "then" lookup can cause termination.
+ if (!isolate->is_catchable_by_javascript(isolate->pending_exception())) {
+ return kNullMaybeHandle;
+ }
+
// a. Return RejectPromise(promise, then.[[Value]]).
Handle<Object> reason(isolate->pending_exception(), isolate);
isolate->clear_pending_exception();
@@ -5569,41 +5552,6 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
return isolate->factory()->undefined_value();
}
-// RegExpKey carries the source and flags of a regular expression as key.
-class RegExpKey : public HashTableKey {
- public:
- RegExpKey(Handle<String> string, JSRegExp::Flags flags)
- : HashTableKey(
- CompilationCacheShape::RegExpHash(*string, Smi::FromInt(flags))),
- string_(string),
- flags_(Smi::FromInt(flags)) {}
-
- // Rather than storing the key in the hash table, a pointer to the
- // stored value is stored where the key should be. IsMatch then
- // compares the search key to the found object, rather than comparing
- // a key to a key.
- bool IsMatch(Object obj) override {
- FixedArray val = FixedArray::cast(obj);
- return string_->Equals(String::cast(val.get(JSRegExp::kSourceIndex))) &&
- (flags_ == val.get(JSRegExp::kFlagsIndex));
- }
-
- Handle<String> string_;
- Smi flags_;
-};
-
-// CodeKey carries the SharedFunctionInfo key associated with a Code
-// object value.
-class CodeKey : public HashTableKey {
- public:
- explicit CodeKey(Handle<SharedFunctionInfo> key)
- : HashTableKey(key->Hash()), key_(key) {}
-
- bool IsMatch(Object string) override { return *key_ == string; }
-
- Handle<SharedFunctionInfo> key_;
-};
-
template <typename Derived, typename Shape>
void HashTable<Derived, Shape>::IteratePrefix(ObjectVisitor* v) {
BodyDescriptorBase::IteratePointers(*this, 0, kElementsStartOffset, v);
@@ -5650,8 +5598,7 @@ Handle<Derived> HashTable<Derived, Shape>::NewInternal(
}
template <typename Derived, typename Shape>
-void HashTable<Derived, Shape>::Rehash(const Isolate* isolate,
- Derived new_table) {
+void HashTable<Derived, Shape>::Rehash(IsolateRoot isolate, Derived new_table) {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = new_table.GetWriteBarrierMode(no_gc);
@@ -5715,7 +5662,7 @@ void HashTable<Derived, Shape>::Swap(InternalIndex entry1, InternalIndex entry2,
}
template <typename Derived, typename Shape>
-void HashTable<Derived, Shape>::Rehash(const Isolate* isolate) {
+void HashTable<Derived, Shape>::Rehash(IsolateRoot isolate) {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = GetWriteBarrierMode(no_gc);
ReadOnlyRoots roots = GetReadOnlyRoots(isolate);
@@ -5782,7 +5729,7 @@ Handle<Derived> HashTable<Derived, Shape>::EnsureCapacity(
isolate, new_nof,
should_pretenure ? AllocationType::kOld : AllocationType::kYoung);
- table->Rehash(GetIsolateForPtrCompr(isolate), *new_table);
+ table->Rehash(isolate, *new_table);
return new_table;
}
@@ -5848,8 +5795,9 @@ Handle<Derived> HashTable<Derived, Shape>::Shrink(Isolate* isolate,
}
template <typename Derived, typename Shape>
-InternalIndex HashTable<Derived, Shape>::FindInsertionEntry(
- const Isolate* isolate, ReadOnlyRoots roots, uint32_t hash) {
+InternalIndex HashTable<Derived, Shape>::FindInsertionEntry(IsolateRoot isolate,
+ ReadOnlyRoots roots,
+ uint32_t hash) {
uint32_t capacity = Capacity();
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
@@ -5898,328 +5846,6 @@ Handle<ObjectHashSet> ObjectHashSet::Add(Isolate* isolate,
return set;
}
-namespace {
-
-const int kLiteralEntryLength = 2;
-const int kLiteralInitialLength = 2;
-const int kLiteralContextOffset = 0;
-const int kLiteralLiteralsOffset = 1;
-
-int SearchLiteralsMapEntry(CompilationCacheTable cache, int cache_entry,
- Context native_context) {
- DisallowHeapAllocation no_gc;
- DCHECK(native_context.IsNativeContext());
- Object obj = cache.get(cache_entry);
-
- // Check that there's no confusion between FixedArray and WeakFixedArray (the
- // object used to be a FixedArray here).
- DCHECK(!obj.IsFixedArray());
- if (obj.IsWeakFixedArray()) {
- WeakFixedArray literals_map = WeakFixedArray::cast(obj);
- int length = literals_map.length();
- for (int i = 0; i < length; i += kLiteralEntryLength) {
- DCHECK(literals_map.Get(i + kLiteralContextOffset)->IsWeakOrCleared());
- if (literals_map.Get(i + kLiteralContextOffset) ==
- HeapObjectReference::Weak(native_context)) {
- return i;
- }
- }
- }
- return -1;
-}
-
-void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
- Handle<Context> native_context,
- Handle<FeedbackCell> feedback_cell) {
- Isolate* isolate = native_context->GetIsolate();
- DCHECK(native_context->IsNativeContext());
- STATIC_ASSERT(kLiteralEntryLength == 2);
- Handle<WeakFixedArray> new_literals_map;
- int entry;
-
- Object obj = cache->get(cache_entry);
-
- // Check that there's no confusion between FixedArray and WeakFixedArray (the
- // object used to be a FixedArray here).
- DCHECK(!obj.IsFixedArray());
- if (!obj.IsWeakFixedArray() || WeakFixedArray::cast(obj).length() == 0) {
- new_literals_map = isolate->factory()->NewWeakFixedArray(
- kLiteralInitialLength, AllocationType::kOld);
- entry = 0;
- } else {
- Handle<WeakFixedArray> old_literals_map(WeakFixedArray::cast(obj), isolate);
- entry = SearchLiteralsMapEntry(*cache, cache_entry, *native_context);
- if (entry >= 0) {
- // Just set the code of the entry.
- old_literals_map->Set(entry + kLiteralLiteralsOffset,
- HeapObjectReference::Weak(*feedback_cell));
- return;
- }
-
- // Can we reuse an entry?
- DCHECK_LT(entry, 0);
- int length = old_literals_map->length();
- for (int i = 0; i < length; i += kLiteralEntryLength) {
- if (old_literals_map->Get(i + kLiteralContextOffset)->IsCleared()) {
- new_literals_map = old_literals_map;
- entry = i;
- break;
- }
- }
-
- if (entry < 0) {
- // Copy old optimized code map and append one new entry.
- new_literals_map = isolate->factory()->CopyWeakFixedArrayAndGrow(
- old_literals_map, kLiteralEntryLength);
- entry = old_literals_map->length();
- }
- }
-
- new_literals_map->Set(entry + kLiteralContextOffset,
- HeapObjectReference::Weak(*native_context));
- new_literals_map->Set(entry + kLiteralLiteralsOffset,
- HeapObjectReference::Weak(*feedback_cell));
-
-#ifdef DEBUG
- for (int i = 0; i < new_literals_map->length(); i += kLiteralEntryLength) {
- MaybeObject object = new_literals_map->Get(i + kLiteralContextOffset);
- DCHECK(object->IsCleared() ||
- object->GetHeapObjectAssumeWeak().IsNativeContext());
- object = new_literals_map->Get(i + kLiteralLiteralsOffset);
- DCHECK(object->IsCleared() ||
- object->GetHeapObjectAssumeWeak().IsFeedbackCell());
- }
-#endif
-
- Object old_literals_map = cache->get(cache_entry);
- if (old_literals_map != *new_literals_map) {
- cache->set(cache_entry, *new_literals_map);
- }
-}
-
-FeedbackCell SearchLiteralsMap(CompilationCacheTable cache, int cache_entry,
- Context native_context) {
- FeedbackCell result;
- int entry = SearchLiteralsMapEntry(cache, cache_entry, native_context);
- if (entry >= 0) {
- WeakFixedArray literals_map = WeakFixedArray::cast(cache.get(cache_entry));
- DCHECK_LE(entry + kLiteralEntryLength, literals_map.length());
- MaybeObject object = literals_map.Get(entry + kLiteralLiteralsOffset);
-
- if (!object->IsCleared()) {
- result = FeedbackCell::cast(object->GetHeapObjectAssumeWeak());
- }
- }
- DCHECK(result.is_null() || result.IsFeedbackCell());
- return result;
-}
-
-} // namespace
-
-MaybeHandle<SharedFunctionInfo> CompilationCacheTable::LookupScript(
- Handle<CompilationCacheTable> table, Handle<String> src,
- Handle<Context> native_context, LanguageMode language_mode) {
- // We use the empty function SFI as part of the key. Although the
- // empty_function is native context dependent, the SFI is de-duped on
- // snapshot builds by the StartupObjectCache, and so this does not prevent
- // reuse of scripts in the compilation cache across native contexts.
- Handle<SharedFunctionInfo> shared(native_context->empty_function().shared(),
- native_context->GetIsolate());
- Isolate* isolate = native_context->GetIsolate();
- src = String::Flatten(isolate, src);
- StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
- InternalIndex entry = table->FindEntry(isolate, &key);
- if (entry.is_not_found()) return MaybeHandle<SharedFunctionInfo>();
- int index = EntryToIndex(entry);
- if (!table->get(index).IsFixedArray()) {
- return MaybeHandle<SharedFunctionInfo>();
- }
- Object obj = table->get(index + 1);
- if (obj.IsSharedFunctionInfo()) {
- return handle(SharedFunctionInfo::cast(obj), native_context->GetIsolate());
- }
- return MaybeHandle<SharedFunctionInfo>();
-}
-
-InfoCellPair CompilationCacheTable::LookupEval(
- Handle<CompilationCacheTable> table, Handle<String> src,
- Handle<SharedFunctionInfo> outer_info, Handle<Context> native_context,
- LanguageMode language_mode, int position) {
- InfoCellPair empty_result;
- Isolate* isolate = native_context->GetIsolate();
- src = String::Flatten(isolate, src);
- StringSharedKey key(src, outer_info, language_mode, position);
- InternalIndex entry = table->FindEntry(isolate, &key);
- if (entry.is_not_found()) return empty_result;
- int index = EntryToIndex(entry);
- if (!table->get(index).IsFixedArray()) return empty_result;
- Object obj = table->get(index + 1);
- if (obj.IsSharedFunctionInfo()) {
- FeedbackCell feedback_cell =
- SearchLiteralsMap(*table, index + 2, *native_context);
- return InfoCellPair(isolate, SharedFunctionInfo::cast(obj), feedback_cell);
- }
- return empty_result;
-}
-
-Handle<Object> CompilationCacheTable::LookupRegExp(Handle<String> src,
- JSRegExp::Flags flags) {
- Isolate* isolate = GetIsolate();
- DisallowHeapAllocation no_allocation;
- RegExpKey key(src, flags);
- InternalIndex entry = FindEntry(isolate, &key);
- if (entry.is_not_found()) return isolate->factory()->undefined_value();
- return Handle<Object>(get(EntryToIndex(entry) + 1), isolate);
-}
-
-MaybeHandle<Code> CompilationCacheTable::LookupCode(
- Handle<SharedFunctionInfo> key) {
- Isolate* isolate = GetIsolate();
- DisallowHeapAllocation no_allocation;
- CodeKey k(key);
- InternalIndex entry = FindEntry(isolate, &k);
- if (entry.is_not_found()) return {};
- return Handle<Code>(Code::cast(get(EntryToIndex(entry) + 1)), isolate);
-}
-
-Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
- Handle<CompilationCacheTable> cache, Handle<String> src,
- Handle<Context> native_context, LanguageMode language_mode,
- Handle<SharedFunctionInfo> value) {
- Isolate* isolate = native_context->GetIsolate();
- // We use the empty function SFI as part of the key. Although the
- // empty_function is native context dependent, the SFI is de-duped on
- // snapshot builds by the StartupObjectCache, and so this does not prevent
- // reuse of scripts in the compilation cache across native contexts.
- Handle<SharedFunctionInfo> shared(native_context->empty_function().shared(),
- isolate);
- src = String::Flatten(isolate, src);
- StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
- Handle<Object> k = key.AsHandle(isolate);
- cache = EnsureCapacity(isolate, cache);
- InternalIndex entry = cache->FindInsertionEntry(isolate, key.Hash());
- cache->set(EntryToIndex(entry), *k);
- cache->set(EntryToIndex(entry) + 1, *value);
- cache->ElementAdded();
- return cache;
-}
-
-Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
- Handle<CompilationCacheTable> cache, Handle<String> src,
- Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
- Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
- int position) {
- Isolate* isolate = native_context->GetIsolate();
- src = String::Flatten(isolate, src);
- StringSharedKey key(src, outer_info, value->language_mode(), position);
- {
- Handle<Object> k = key.AsHandle(isolate);
- InternalIndex entry = cache->FindEntry(isolate, &key);
- if (entry.is_found()) {
- cache->set(EntryToIndex(entry), *k);
- cache->set(EntryToIndex(entry) + 1, *value);
- // AddToFeedbackCellsMap may allocate a new sub-array to live in the
- // entry, but it won't change the cache array. Therefore EntryToIndex
- // and entry remains correct.
- AddToFeedbackCellsMap(cache, EntryToIndex(entry) + 2, native_context,
- feedback_cell);
- // Add hash again even on cache hit to avoid unnecessary cache delay in
- // case of hash collisions.
- }
- }
-
- cache = EnsureCapacity(isolate, cache);
- InternalIndex entry = cache->FindInsertionEntry(isolate, key.Hash());
- Handle<Object> k =
- isolate->factory()->NewNumber(static_cast<double>(key.Hash()));
- cache->set(EntryToIndex(entry), *k);
- cache->set(EntryToIndex(entry) + 1, Smi::FromInt(kHashGenerations));
- cache->ElementAdded();
- return cache;
-}
-
-Handle<CompilationCacheTable> CompilationCacheTable::PutRegExp(
- Isolate* isolate, Handle<CompilationCacheTable> cache, Handle<String> src,
- JSRegExp::Flags flags, Handle<FixedArray> value) {
- RegExpKey key(src, flags);
- cache = EnsureCapacity(isolate, cache);
- InternalIndex entry = cache->FindInsertionEntry(isolate, key.Hash());
- // We store the value in the key slot, and compare the search key
- // to the stored value with a custom IsMatch function during lookups.
- cache->set(EntryToIndex(entry), *value);
- cache->set(EntryToIndex(entry) + 1, *value);
- cache->ElementAdded();
- return cache;
-}
-
-Handle<CompilationCacheTable> CompilationCacheTable::PutCode(
- Isolate* isolate, Handle<CompilationCacheTable> cache,
- Handle<SharedFunctionInfo> key, Handle<Code> value) {
- CodeKey k(key);
-
- {
- InternalIndex entry = cache->FindEntry(isolate, &k);
- if (entry.is_found()) {
- // Update.
- cache->set(EntryToIndex(entry), *key);
- cache->set(EntryToIndex(entry) + 1, *value);
- return cache;
- }
- }
-
- // Insert.
- cache = EnsureCapacity(isolate, cache);
- InternalIndex entry = cache->FindInsertionEntry(isolate, k.Hash());
- cache->set(EntryToIndex(entry), *key);
- cache->set(EntryToIndex(entry) + 1, *value);
- cache->ElementAdded();
- return cache;
-}
-
-void CompilationCacheTable::Age() {
- DisallowHeapAllocation no_allocation;
- Object the_hole_value = GetReadOnlyRoots().the_hole_value();
- for (InternalIndex entry : IterateEntries()) {
- int entry_index = EntryToIndex(entry);
- int value_index = entry_index + 1;
-
- if (get(entry_index).IsNumber()) {
- Smi count = Smi::cast(get(value_index));
- count = Smi::FromInt(count.value() - 1);
- if (count.value() == 0) {
- NoWriteBarrierSet(*this, entry_index, the_hole_value);
- NoWriteBarrierSet(*this, value_index, the_hole_value);
- ElementRemoved();
- } else {
- NoWriteBarrierSet(*this, value_index, count);
- }
- } else if (get(entry_index).IsFixedArray()) {
- SharedFunctionInfo info = SharedFunctionInfo::cast(get(value_index));
- if (info.IsInterpreted() && info.GetBytecodeArray().IsOld()) {
- for (int i = 0; i < kEntrySize; i++) {
- NoWriteBarrierSet(*this, entry_index + i, the_hole_value);
- }
- ElementRemoved();
- }
- }
- }
-}
-
-void CompilationCacheTable::Remove(Object value) {
- DisallowHeapAllocation no_allocation;
- Object the_hole_value = GetReadOnlyRoots().the_hole_value();
- for (InternalIndex entry : IterateEntries()) {
- int entry_index = EntryToIndex(entry);
- int value_index = entry_index + 1;
- if (get(value_index) == value) {
- for (int i = 0; i < kEntrySize; i++) {
- NoWriteBarrierSet(*this, entry_index + i, the_hole_value);
- }
- ElementRemoved();
- }
- }
-}
-
template <typename Derived, typename Shape>
template <typename LocalIsolate>
Handle<Derived> BaseNameDictionary<Derived, Shape>::New(
@@ -6340,8 +5966,7 @@ Handle<Derived> Dictionary<Derived, Shape>::Add(LocalIsolate* isolate,
// Compute the key object.
Handle<Object> k = Shape::AsHandle(isolate, key);
- InternalIndex entry = dictionary->FindInsertionEntry(
- GetIsolateForPtrCompr(isolate), roots, hash);
+ InternalIndex entry = dictionary->FindInsertionEntry(isolate, roots, hash);
dictionary->SetEntry(entry, *k, *value, details);
DCHECK(dictionary->KeyAt(isolate, entry).IsNumber() ||
Shape::Unwrap(dictionary->KeyAt(isolate, entry)).IsUniqueName());
@@ -6422,71 +6047,6 @@ int Dictionary<Derived, Shape>::NumberOfEnumerableProperties() {
return result;
}
-template <typename Dictionary>
-struct EnumIndexComparator {
- explicit EnumIndexComparator(Dictionary dict) : dict(dict) {}
- bool operator()(Tagged_t a, Tagged_t b) {
- PropertyDetails da(
- dict.DetailsAt(InternalIndex(Smi(static_cast<Address>(a)).value())));
- PropertyDetails db(
- dict.DetailsAt(InternalIndex(Smi(static_cast<Address>(b)).value())));
- return da.dictionary_index() < db.dictionary_index();
- }
- Dictionary dict;
-};
-
-template <typename Derived, typename Shape>
-void BaseNameDictionary<Derived, Shape>::CopyEnumKeysTo(
- Isolate* isolate, Handle<Derived> dictionary, Handle<FixedArray> storage,
- KeyCollectionMode mode, KeyAccumulator* accumulator) {
- DCHECK_IMPLIES(mode != KeyCollectionMode::kOwnOnly, accumulator != nullptr);
- int length = storage->length();
- int properties = 0;
- ReadOnlyRoots roots(isolate);
- {
- AllowHeapAllocation allow_gc;
- for (InternalIndex i : dictionary->IterateEntries()) {
- Object key;
- if (!dictionary->ToKey(roots, i, &key)) continue;
- bool is_shadowing_key = false;
- if (key.IsSymbol()) continue;
- PropertyDetails details = dictionary->DetailsAt(i);
- if (details.IsDontEnum()) {
- if (mode == KeyCollectionMode::kIncludePrototypes) {
- is_shadowing_key = true;
- } else {
- continue;
- }
- }
- if (is_shadowing_key) {
- // This might allocate, but {key} is not used afterwards.
- accumulator->AddShadowingKey(key, &allow_gc);
- continue;
- } else {
- storage->set(properties, Smi::FromInt(i.as_int()));
- }
- properties++;
- if (mode == KeyCollectionMode::kOwnOnly && properties == length) break;
- }
- }
-
- CHECK_EQ(length, properties);
- {
- DisallowHeapAllocation no_gc;
- Derived raw_dictionary = *dictionary;
- FixedArray raw_storage = *storage;
- EnumIndexComparator<Derived> cmp(raw_dictionary);
- // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
- // store operations that are safe for concurrent marking.
- AtomicSlot start(storage->GetFirstElementAddress());
- std::sort(start, start + length, cmp);
- for (int i = 0; i < length; i++) {
- InternalIndex index(Smi::ToInt(raw_storage.get(i)));
- raw_storage.set(i, raw_dictionary.NameAt(index));
- }
- }
-}
-
template <typename Derived, typename Shape>
Handle<FixedArray> BaseNameDictionary<Derived, Shape>::IterationIndices(
Isolate* isolate, Handle<Derived> dictionary) {
@@ -6520,71 +6080,6 @@ Handle<FixedArray> BaseNameDictionary<Derived, Shape>::IterationIndices(
return FixedArray::ShrinkOrEmpty(isolate, array, array_size);
}
-template <typename Derived, typename Shape>
-ExceptionStatus BaseNameDictionary<Derived, Shape>::CollectKeysTo(
- Handle<Derived> dictionary, KeyAccumulator* keys) {
- Isolate* isolate = keys->isolate();
- ReadOnlyRoots roots(isolate);
- // TODO(jkummerow): Consider using a std::unique_ptr<InternalIndex[]> instead.
- Handle<FixedArray> array =
- isolate->factory()->NewFixedArray(dictionary->NumberOfElements());
- int array_size = 0;
- PropertyFilter filter = keys->filter();
- // Handle enumerable strings in CopyEnumKeysTo.
- DCHECK_NE(keys->filter(), ENUMERABLE_STRINGS);
- {
- DisallowHeapAllocation no_gc;
- for (InternalIndex i : dictionary->IterateEntries()) {
- Object key;
- Derived raw_dictionary = *dictionary;
- if (!raw_dictionary.ToKey(roots, i, &key)) continue;
- if (key.FilterKey(filter)) continue;
- PropertyDetails details = raw_dictionary.DetailsAt(i);
- if ((details.attributes() & filter) != 0) {
- AllowHeapAllocation gc;
- // This might allocate, but {key} is not used afterwards.
- keys->AddShadowingKey(key, &gc);
- continue;
- }
- if (filter & ONLY_ALL_CAN_READ) {
- if (details.kind() != kAccessor) continue;
- Object accessors = raw_dictionary.ValueAt(i);
- if (!accessors.IsAccessorInfo()) continue;
- if (!AccessorInfo::cast(accessors).all_can_read()) continue;
- }
- array->set(array_size++, Smi::FromInt(i.as_int()));
- }
-
- EnumIndexComparator<Derived> cmp(*dictionary);
- // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
- // store operations that are safe for concurrent marking.
- AtomicSlot start(array->GetFirstElementAddress());
- std::sort(start, start + array_size, cmp);
- }
-
- bool has_seen_symbol = false;
- for (int i = 0; i < array_size; i++) {
- InternalIndex index(Smi::ToInt(array->get(i)));
- Object key = dictionary->NameAt(index);
- if (key.IsSymbol()) {
- has_seen_symbol = true;
- continue;
- }
- ExceptionStatus status = keys->AddKey(key, DO_NOT_CONVERT);
- if (!status) return status;
- }
- if (has_seen_symbol) {
- for (int i = 0; i < array_size; i++) {
- InternalIndex index(Smi::ToInt(array->get(i)));
- Object key = dictionary->NameAt(index);
- if (!key.IsSymbol()) continue;
- ExceptionStatus status = keys->AddKey(key, DO_NOT_CONVERT);
- if (!status) return status;
- }
- }
- return ExceptionStatus::kSuccess;
-}
-
// Backwards lookup (slow).
template <typename Derived, typename Shape>
Object Dictionary<Derived, Shape>::SlowReverseLookup(Object value) {
@@ -6609,7 +6104,7 @@ void ObjectHashTableBase<Derived, Shape>::FillEntriesWithHoles(
}
template <typename Derived, typename Shape>
-Object ObjectHashTableBase<Derived, Shape>::Lookup(const Isolate* isolate,
+Object ObjectHashTableBase<Derived, Shape>::Lookup(IsolateRoot isolate,
Handle<Object> key,
int32_t hash) {
DisallowHeapAllocation no_gc;
@@ -6625,7 +6120,7 @@ template <typename Derived, typename Shape>
Object ObjectHashTableBase<Derived, Shape>::Lookup(Handle<Object> key) {
DisallowHeapAllocation no_gc;
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
ReadOnlyRoots roots = this->GetReadOnlyRoots(isolate);
DCHECK(this->IsKey(roots, *key));
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index 94bcb9a479..81117c24db 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -281,7 +281,7 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
#define IS_TYPE_FUNCTION_DECL(Type) \
V8_INLINE bool Is##Type() const; \
- V8_INLINE bool Is##Type(const Isolate* isolate) const;
+ V8_INLINE bool Is##Type(IsolateRoot isolate) const;
OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
IS_TYPE_FUNCTION_DECL(HashTableBase)
@@ -309,7 +309,7 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
#define DECL_STRUCT_PREDICATE(NAME, Name, name) \
V8_INLINE bool Is##Name() const; \
- V8_INLINE bool Is##Name(const Isolate* isolate) const;
+ V8_INLINE bool Is##Name(IsolateRoot isolate) const;
STRUCT_LIST(DECL_STRUCT_PREDICATE)
#undef DECL_STRUCT_PREDICATE
@@ -324,9 +324,9 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
V8_EXPORT_PRIVATE bool ToInt32(int32_t* value);
inline bool ToUint32(uint32_t* value) const;
- inline Representation OptimalRepresentation(const Isolate* isolate) const;
+ inline Representation OptimalRepresentation(IsolateRoot isolate) const;
- inline ElementsKind OptimalElementsKind(const Isolate* isolate) const;
+ inline ElementsKind OptimalElementsKind(IsolateRoot isolate) const;
inline bool FitsRepresentation(Representation representation);
@@ -586,6 +586,9 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
// and length.
bool IterationHasObservableEffects();
+ // TC39 "Dynamic Code Brand Checks"
+ bool IsCodeLike(Isolate* isolate) const;
+
EXPORT_DECL_VERIFIER(Object)
#ifdef VERIFY_HEAP
@@ -666,6 +669,17 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
}
}
+ //
+ // ExternalPointer_t field accessors.
+ //
+ inline void InitExternalPointerField(size_t offset, Isolate* isolate);
+ inline void InitExternalPointerField(size_t offset, Isolate* isolate,
+ Address value, ExternalPointerTag tag);
+ inline Address ReadExternalPointerField(size_t offset, IsolateRoot isolate,
+ ExternalPointerTag tag) const;
+ inline void WriteExternalPointerField(size_t offset, Isolate* isolate,
+ Address value, ExternalPointerTag tag);
+
protected:
inline Address field_address(size_t offset) const {
return ptr() + offset - kHeapObjectTag;
diff --git a/deps/v8/src/objects/oddball-inl.h b/deps/v8/src/objects/oddball-inl.h
index 4b274097b8..4a022831be 100644
--- a/deps/v8/src/objects/oddball-inl.h
+++ b/deps/v8/src/objects/oddball-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/oddball-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(Oddball)
void Oddball::set_to_number_raw_as_bits(uint64_t bits) {
diff --git a/deps/v8/src/objects/oddball.h b/deps/v8/src/objects/oddball.h
index 5f0c7ce001..30f6fa70f8 100644
--- a/deps/v8/src/objects/oddball.h
+++ b/deps/v8/src/objects/oddball.h
@@ -13,6 +13,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/oddball-tq.inc"
+
// The Oddball describes objects null, undefined, true, and false.
class Oddball : public TorqueGeneratedOddball<Oddball, PrimitiveHeapObject> {
public:
@@ -49,10 +51,7 @@ class Oddball : public TorqueGeneratedOddball<Oddball, PrimitiveHeapObject> {
static const byte kSelfReferenceMarker = 10;
static const byte kBasicBlockCountersMarker = 11;
- static_assert(kStartOfWeakFieldsOffset == kEndOfWeakFieldsOffset,
- "Ensure BodyDescriptor does not need to handle weak fields.");
- using BodyDescriptor = FixedBodyDescriptor<kStartOfStrongFieldsOffset,
- kEndOfStrongFieldsOffset, kSize>;
+ class BodyDescriptor;
STATIC_ASSERT(kKindOffset == Internals::kOddballKindOffset);
STATIC_ASSERT(kNull == Internals::kNullOddballKind);
diff --git a/deps/v8/src/objects/oddball.tq b/deps/v8/src/objects/oddball.tq
index 44a3d2aa51..d111779a31 100644
--- a/deps/v8/src/objects/oddball.tq
+++ b/deps/v8/src/objects/oddball.tq
@@ -2,10 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
+@export
+@customCppClass
+@customMap // Oddballs have one of multiple maps, depending on the kind.
@apiExposedInstanceTypeValue(0x43)
@highestInstanceTypeWithinParentClassRange
-extern class Oddball extends PrimitiveHeapObject {
+class Oddball extends PrimitiveHeapObject {
to_number_raw: float64;
to_string: String;
to_number: Number;
diff --git a/deps/v8/src/objects/ordered-hash-table-inl.h b/deps/v8/src/objects/ordered-hash-table-inl.h
index 6edd5c3cda..959d7a7801 100644
--- a/deps/v8/src/objects/ordered-hash-table-inl.h
+++ b/deps/v8/src/objects/ordered-hash-table-inl.h
@@ -31,6 +31,12 @@ template <class Derived, int entrysize>
OrderedHashTable<Derived, entrysize>::OrderedHashTable(Address ptr)
: FixedArray(ptr) {}
+template <class Derived, int entrysize>
+bool OrderedHashTable<Derived, entrysize>::IsKey(ReadOnlyRoots roots,
+ Object k) {
+ return k != roots.the_hole_value();
+}
+
OrderedHashSet::OrderedHashSet(Address ptr)
: OrderedHashTable<OrderedHashSet, 1>(ptr) {
SLOW_DCHECK(IsOrderedHashSet());
@@ -51,9 +57,9 @@ SmallOrderedHashTable<Derived>::SmallOrderedHashTable(Address ptr)
: HeapObject(ptr) {}
template <class Derived>
-Object SmallOrderedHashTable<Derived>::KeyAt(int entry) const {
- DCHECK_LT(entry, Capacity());
- Offset entry_offset = GetDataEntryOffset(entry, Derived::kKeyIndex);
+Object SmallOrderedHashTable<Derived>::KeyAt(InternalIndex entry) const {
+ DCHECK_LT(entry.as_int(), Capacity());
+ Offset entry_offset = GetDataEntryOffset(entry.as_int(), Derived::kKeyIndex);
return TaggedField<Object>::load(*this, entry_offset);
}
@@ -97,63 +103,65 @@ Handle<Map> SmallOrderedHashSet::GetMap(ReadOnlyRoots roots) {
return roots.small_ordered_hash_set_map_handle();
}
-inline Object OrderedHashMap::ValueAt(int entry) {
- DCHECK_NE(entry, kNotFound);
- DCHECK_LT(entry, UsedCapacity());
+inline Object OrderedHashMap::ValueAt(InternalIndex entry) {
+ DCHECK_LT(entry.as_int(), UsedCapacity());
return get(EntryToIndex(entry) + kValueOffset);
}
-inline Object OrderedNameDictionary::ValueAt(int entry) {
- DCHECK_NE(entry, kNotFound);
- DCHECK_LT(entry, UsedCapacity());
+inline Object OrderedNameDictionary::ValueAt(InternalIndex entry) {
+ DCHECK_LT(entry.as_int(), UsedCapacity());
return get(EntryToIndex(entry) + kValueOffset);
}
+Name OrderedNameDictionary::NameAt(InternalIndex entry) {
+ return Name::cast(KeyAt(entry));
+}
+
// Set the value for entry.
-inline void OrderedNameDictionary::ValueAtPut(int entry, Object value) {
- DCHECK_NE(entry, kNotFound);
- DCHECK_LT(entry, UsedCapacity());
+inline void OrderedNameDictionary::ValueAtPut(InternalIndex entry,
+ Object value) {
+ DCHECK_LT(entry.as_int(), UsedCapacity());
this->set(EntryToIndex(entry) + kValueOffset, value);
}
// Returns the property details for the property at entry.
-inline PropertyDetails OrderedNameDictionary::DetailsAt(int entry) {
- DCHECK_NE(entry, kNotFound);
- DCHECK_LT(entry, this->UsedCapacity());
+inline PropertyDetails OrderedNameDictionary::DetailsAt(InternalIndex entry) {
+ DCHECK_LT(entry.as_int(), this->UsedCapacity());
// TODO(gsathya): Optimize the cast away.
return PropertyDetails(
Smi::cast(get(EntryToIndex(entry) + kPropertyDetailsOffset)));
}
-inline void OrderedNameDictionary::DetailsAtPut(int entry,
+inline void OrderedNameDictionary::DetailsAtPut(InternalIndex entry,
PropertyDetails value) {
- DCHECK_NE(entry, kNotFound);
- DCHECK_LT(entry, this->UsedCapacity());
+ DCHECK_LT(entry.as_int(), this->UsedCapacity());
// TODO(gsathya): Optimize the cast away.
this->set(EntryToIndex(entry) + kPropertyDetailsOffset, value.AsSmi());
}
-inline Object SmallOrderedNameDictionary::ValueAt(int entry) {
- return this->GetDataEntry(entry, kValueIndex);
+inline Object SmallOrderedNameDictionary::ValueAt(InternalIndex entry) {
+ return this->GetDataEntry(entry.as_int(), kValueIndex);
}
// Set the value for entry.
-inline void SmallOrderedNameDictionary::ValueAtPut(int entry, Object value) {
- this->SetDataEntry(entry, kValueIndex, value);
+inline void SmallOrderedNameDictionary::ValueAtPut(InternalIndex entry,
+ Object value) {
+ this->SetDataEntry(entry.as_int(), kValueIndex, value);
}
// Returns the property details for the property at entry.
-inline PropertyDetails SmallOrderedNameDictionary::DetailsAt(int entry) {
+inline PropertyDetails SmallOrderedNameDictionary::DetailsAt(
+ InternalIndex entry) {
// TODO(gsathya): Optimize the cast away. And store this in the data table.
return PropertyDetails(
- Smi::cast(this->GetDataEntry(entry, kPropertyDetailsIndex)));
+ Smi::cast(this->GetDataEntry(entry.as_int(), kPropertyDetailsIndex)));
}
// Set the details for entry.
-inline void SmallOrderedNameDictionary::DetailsAtPut(int entry,
+inline void SmallOrderedNameDictionary::DetailsAtPut(InternalIndex entry,
PropertyDetails value) {
// TODO(gsathya): Optimize the cast away. And store this in the data table.
- this->SetDataEntry(entry, kPropertyDetailsIndex, value.AsSmi());
+ this->SetDataEntry(entry.as_int(), kPropertyDetailsIndex, value.AsSmi());
}
inline bool OrderedHashSet::Is(Handle<HeapObject> table) {
@@ -193,7 +201,9 @@ template <class Derived, class TableType>
Object OrderedHashTableIterator<Derived, TableType>::CurrentKey() {
TableType table = TableType::cast(this->table());
int index = Smi::ToInt(this->index());
- Object key = table.KeyAt(index);
+ DCHECK_LE(0, index);
+ InternalIndex entry(index);
+ Object key = table.KeyAt(entry);
DCHECK(!key.IsTheHole());
return key;
}
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index d3250bd92d..15673daa62 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -6,6 +6,7 @@
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
+#include "src/objects/internal-index.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
@@ -22,7 +23,8 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
// from number of buckets. If we decide to change kLoadFactor
// to something other than 2, capacity should be stored as another
// field of this object.
- capacity = base::bits::RoundUpToPowerOfTwo32(Max(kMinCapacity, capacity));
+ capacity =
+ base::bits::RoundUpToPowerOfTwo32(std::max({kInitialCapacity, capacity}));
if (capacity > MaxCapacity()) {
return MaybeHandle<Derived>();
}
@@ -42,6 +44,24 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
}
template <class Derived, int entrysize>
+MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::AllocateEmpty(
+ Isolate* isolate, AllocationType allocation, RootIndex root_index) {
+ // This is only supposed to be used to create the canonical empty versions
+ // of each ordered structure, and should not be used afterwards.
+ // Requires that the map has already been set up in the roots table.
+ DCHECK(ReadOnlyRoots(isolate).at(root_index) == kNullAddress);
+
+ Handle<FixedArray> backing_store = isolate->factory()->NewFixedArrayWithMap(
+ Derived::GetMap(ReadOnlyRoots(isolate)), HashTableStartIndex(),
+ allocation);
+ Handle<Derived> table = Handle<Derived>::cast(backing_store);
+ table->SetNumberOfBuckets(0);
+ table->SetNumberOfElements(0);
+ table->SetNumberOfDeletedElements(0);
+ return table;
+}
+
+template <class Derived, int entrysize>
MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::EnsureGrowable(
Isolate* isolate, Handle<Derived> table) {
DCHECK(!table->IsObsolete());
@@ -50,11 +70,21 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::EnsureGrowable(
int nod = table->NumberOfDeletedElements();
int capacity = table->Capacity();
if ((nof + nod) < capacity) return table;
- // Don't need to grow if we can simply clear out deleted entries instead.
- // Note that we can't compact in place, though, so we always allocate
- // a new table.
- return Derived::Rehash(isolate, table,
- (nod < (capacity >> 1)) ? capacity << 1 : capacity);
+
+ int new_capacity;
+ if (capacity == 0) {
+ // step from empty to minimum proper size
+ new_capacity = kInitialCapacity;
+ } else if (nod >= (capacity >> 1)) {
+ // Don't need to grow if we can simply clear out deleted entries instead.
+ // Note that we can't compact in place, though, so we always allocate
+ // a new table.
+ new_capacity = capacity;
+ } else {
+ new_capacity = capacity << 1;
+ }
+
+ return Derived::Rehash(isolate, table, new_capacity);
}
template <class Derived, int entrysize>
@@ -78,10 +108,13 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Clear(
: AllocationType::kOld;
Handle<Derived> new_table =
- Allocate(isolate, kMinCapacity, allocation_type).ToHandleChecked();
+ Allocate(isolate, kInitialCapacity, allocation_type).ToHandleChecked();
- table->SetNextTable(*new_table);
- table->SetNumberOfDeletedElements(kClearedTableSentinel);
+ if (table->NumberOfBuckets() > 0) {
+ // Don't try to modify the empty canonical table which lives in RO space.
+ table->SetNextTable(*new_table);
+ table->SetNumberOfDeletedElements(kClearedTableSentinel);
+ }
return new_table;
}
@@ -92,48 +125,56 @@ bool OrderedHashTable<Derived, entrysize>::HasKey(Isolate* isolate,
DCHECK_IMPLIES(entrysize == 1, table.IsOrderedHashSet());
DCHECK_IMPLIES(entrysize == 2, table.IsOrderedHashMap());
DisallowHeapAllocation no_gc;
- int entry = table.FindEntry(isolate, key);
- return entry != kNotFound;
+ InternalIndex entry = table.FindEntry(isolate, key);
+ return entry.is_found();
}
template <class Derived, int entrysize>
-int OrderedHashTable<Derived, entrysize>::FindEntry(Isolate* isolate,
- Object key) {
- int entry;
+InternalIndex OrderedHashTable<Derived, entrysize>::FindEntry(Isolate* isolate,
+ Object key) {
+ if (NumberOfElements() == 0) {
+ // This is not just an optimization but also ensures that we do the right
+ // thing if Capacity() == 0
+ return InternalIndex::NotFound();
+ }
+
+ int raw_entry;
// This special cases for Smi, so that we avoid the HandleScope
// creation below.
if (key.IsSmi()) {
uint32_t hash = ComputeUnseededHash(Smi::ToInt(key));
- entry = HashToEntry(hash & Smi::kMaxValue);
+ raw_entry = HashToEntryRaw(hash & Smi::kMaxValue);
} else {
HandleScope scope(isolate);
Object hash = key.GetHash();
// If the object does not have an identity hash, it was never used as a key
- if (hash.IsUndefined(isolate)) return kNotFound;
- entry = HashToEntry(Smi::ToInt(hash));
+ if (hash.IsUndefined(isolate)) return InternalIndex::NotFound();
+ raw_entry = HashToEntryRaw(Smi::ToInt(hash));
}
// Walk the chain in the bucket to find the key.
- while (entry != kNotFound) {
- Object candidate_key = KeyAt(entry);
- if (candidate_key.SameValueZero(key)) break;
- entry = NextChainEntry(entry);
+ while (raw_entry != kNotFound) {
+ Object candidate_key = KeyAt(InternalIndex(raw_entry));
+ if (candidate_key.SameValueZero(key)) return InternalIndex(raw_entry);
+ raw_entry = NextChainEntryRaw(raw_entry);
}
- return entry;
+ return InternalIndex::NotFound();
}
MaybeHandle<OrderedHashSet> OrderedHashSet::Add(Isolate* isolate,
Handle<OrderedHashSet> table,
Handle<Object> key) {
int hash = key->GetOrCreateHash(isolate).value();
- int entry = table->HashToEntry(hash);
- // Walk the chain of the bucket and try finding the key.
- while (entry != kNotFound) {
- Object candidate_key = table->KeyAt(entry);
- // Do not add if we have the key already
- if (candidate_key.SameValueZero(*key)) return table;
- entry = table->NextChainEntry(entry);
+ if (table->NumberOfElements() > 0) {
+ int raw_entry = table->HashToEntryRaw(hash);
+ // Walk the chain of the bucket and try finding the key.
+ while (raw_entry != kNotFound) {
+ Object candidate_key = table->KeyAt(InternalIndex(raw_entry));
+ // Do not add if we have the key already
+ if (candidate_key.SameValueZero(*key)) return table;
+ raw_entry = table->NextChainEntryRaw(raw_entry);
+ }
}
MaybeHandle<OrderedHashSet> table_candidate =
@@ -143,11 +184,11 @@ MaybeHandle<OrderedHashSet> OrderedHashSet::Add(Isolate* isolate,
}
// Read the existing bucket values.
int bucket = table->HashToBucket(hash);
- int previous_entry = table->HashToEntry(hash);
+ int previous_entry = table->HashToEntryRaw(hash);
int nof = table->NumberOfElements();
// Insert a new entry at the end,
int new_entry = nof + table->NumberOfDeletedElements();
- int new_index = table->EntryToIndex(new_entry);
+ int new_index = table->EntryToIndexRaw(new_entry);
table->set(new_index, *key);
table->set(new_index + kChainOffset, Smi::FromInt(previous_entry));
// and point the bucket to the new entry.
@@ -214,17 +255,17 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
if (!new_table_candidate.ToHandle(&new_table)) {
return new_table_candidate;
}
- int nof = table->NumberOfElements();
- int nod = table->NumberOfDeletedElements();
int new_buckets = new_table->NumberOfBuckets();
int new_entry = 0;
int removed_holes_index = 0;
DisallowHeapAllocation no_gc;
- for (int old_entry = 0; old_entry < (nof + nod); ++old_entry) {
+
+ for (InternalIndex old_entry : table->IterateEntries()) {
+ int old_entry_raw = old_entry.as_int();
Object key = table->KeyAt(old_entry);
if (key.IsTheHole(isolate)) {
- table->SetRemovedIndexAt(removed_holes_index++, old_entry);
+ table->SetRemovedIndexAt(removed_holes_index++, old_entry_raw);
continue;
}
@@ -232,8 +273,8 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
int bucket = Smi::ToInt(hash) & (new_buckets - 1);
Object chain_entry = new_table->get(HashTableStartIndex() + bucket);
new_table->set(HashTableStartIndex() + bucket, Smi::FromInt(new_entry));
- int new_index = new_table->EntryToIndex(new_entry);
- int old_index = table->EntryToIndex(old_entry);
+ int new_index = new_table->EntryToIndexRaw(new_entry);
+ int old_index = table->EntryToIndexRaw(old_entry_raw);
for (int i = 0; i < entrysize; ++i) {
Object value = table->get(old_index + i);
new_table->set(new_index + i, value);
@@ -242,10 +283,13 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
++new_entry;
}
- DCHECK_EQ(nod, removed_holes_index);
+ DCHECK_EQ(table->NumberOfDeletedElements(), removed_holes_index);
- new_table->SetNumberOfElements(nof);
- table->SetNextTable(*new_table);
+ new_table->SetNumberOfElements(table->NumberOfElements());
+ if (table->NumberOfBuckets() > 0) {
+ // Don't try to modify the empty canonical table which lives in RO space.
+ table->SetNextTable(*new_table);
+ }
return new_table_candidate;
}
@@ -253,36 +297,29 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
MaybeHandle<OrderedHashSet> OrderedHashSet::Rehash(Isolate* isolate,
Handle<OrderedHashSet> table,
int new_capacity) {
- return OrderedHashTable<OrderedHashSet, 1>::Rehash(isolate, table,
- new_capacity);
+ return Base::Rehash(isolate, table, new_capacity);
}
MaybeHandle<OrderedHashSet> OrderedHashSet::Rehash(
Isolate* isolate, Handle<OrderedHashSet> table) {
- return OrderedHashTable<
- OrderedHashSet, OrderedHashSet::kEntrySizeWithoutChain>::Rehash(isolate,
- table);
+ return Base::Rehash(isolate, table);
}
MaybeHandle<OrderedHashMap> OrderedHashMap::Rehash(
Isolate* isolate, Handle<OrderedHashMap> table) {
- return OrderedHashTable<
- OrderedHashMap, OrderedHashMap::kEntrySizeWithoutChain>::Rehash(isolate,
- table);
+ return Base::Rehash(isolate, table);
}
MaybeHandle<OrderedHashMap> OrderedHashMap::Rehash(Isolate* isolate,
Handle<OrderedHashMap> table,
int new_capacity) {
- return OrderedHashTable<OrderedHashMap, 2>::Rehash(isolate, table,
- new_capacity);
+ return Base::Rehash(isolate, table, new_capacity);
}
MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Rehash(
Isolate* isolate, Handle<OrderedNameDictionary> table, int new_capacity) {
MaybeHandle<OrderedNameDictionary> new_table_candidate =
- OrderedHashTable<OrderedNameDictionary, 3>::Rehash(isolate, table,
- new_capacity);
+ Base::Rehash(isolate, table, new_capacity);
Handle<OrderedNameDictionary> new_table;
if (new_table_candidate.ToHandle(&new_table)) {
new_table->SetHash(table->Hash());
@@ -294,8 +331,8 @@ template <class Derived, int entrysize>
bool OrderedHashTable<Derived, entrysize>::Delete(Isolate* isolate,
Derived table, Object key) {
DisallowHeapAllocation no_gc;
- int entry = table.FindEntry(isolate, key);
- if (entry == kNotFound) return false;
+ InternalIndex entry = table.FindEntry(isolate, key);
+ if (entry.is_not_found()) return false;
int nof = table.NumberOfElements();
int nod = table.NumberOfDeletedElements();
@@ -312,6 +349,17 @@ bool OrderedHashTable<Derived, entrysize>::Delete(Isolate* isolate,
return true;
}
+// Parameter |roots| only here for compatibility with HashTable<...>::ToKey.
+template <class Derived, int entrysize>
+bool OrderedHashTable<Derived, entrysize>::ToKey(ReadOnlyRoots roots,
+ InternalIndex entry,
+ Object* out_key) {
+ Object k = KeyAt(entry);
+ if (!IsKey(roots, k)) return false;
+ *out_key = k;
+ return true;
+}
+
Address OrderedHashMap::GetHash(Isolate* isolate, Address raw_key) {
DisallowHeapAllocation no_gc;
Object key(raw_key);
@@ -328,16 +376,18 @@ MaybeHandle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
Handle<Object> key,
Handle<Object> value) {
int hash = key->GetOrCreateHash(isolate).value();
- int entry = table->HashToEntry(hash);
- // Walk the chain of the bucket and try finding the key.
- {
- DisallowHeapAllocation no_gc;
- Object raw_key = *key;
- while (entry != kNotFound) {
- Object candidate_key = table->KeyAt(entry);
- // Do not add if we have the key already
- if (candidate_key.SameValueZero(raw_key)) return table;
- entry = table->NextChainEntry(entry);
+ if (table->NumberOfElements() > 0) {
+ int raw_entry = table->HashToEntryRaw(hash);
+ // Walk the chain of the bucket and try finding the key.
+ {
+ DisallowHeapAllocation no_gc;
+ Object raw_key = *key;
+ while (raw_entry != kNotFound) {
+ Object candidate_key = table->KeyAt(InternalIndex(raw_entry));
+ // Do not add if we have the key already
+ if (candidate_key.SameValueZero(raw_key)) return table;
+ raw_entry = table->NextChainEntryRaw(raw_entry);
+ }
}
}
@@ -348,11 +398,11 @@ MaybeHandle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
}
// Read the existing bucket values.
int bucket = table->HashToBucket(hash);
- int previous_entry = table->HashToEntry(hash);
+ int previous_entry = table->HashToEntryRaw(hash);
int nof = table->NumberOfElements();
// Insert a new entry at the end,
int new_entry = nof + table->NumberOfDeletedElements();
- int new_index = table->EntryToIndex(new_entry);
+ int new_index = table->EntryToIndexRaw(new_entry);
table->set(new_index, *key);
table->set(new_index + kValueOffset, *value);
table->set(new_index + kChainOffset, Smi::FromInt(previous_entry));
@@ -362,16 +412,21 @@ MaybeHandle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
return table;
}
-template <>
-V8_EXPORT_PRIVATE int OrderedHashTable<OrderedNameDictionary, 3>::FindEntry(
- Isolate* isolate, Object key) {
+InternalIndex OrderedNameDictionary::FindEntry(Isolate* isolate, Object key) {
DisallowHeapAllocation no_gc;
DCHECK(key.IsUniqueName());
Name raw_key = Name::cast(key);
- int entry = HashToEntry(raw_key.Hash());
- while (entry != kNotFound) {
+ if (NumberOfElements() == 0) {
+ // This is not just an optimization but also ensures that we do the right
+ // thing if Capacity() == 0
+ return InternalIndex::NotFound();
+ }
+
+ int raw_entry = HashToEntryRaw(raw_key.Hash());
+ while (raw_entry != kNotFound) {
+ InternalIndex entry(raw_entry);
Object candidate_key = KeyAt(entry);
DCHECK(candidate_key.IsTheHole() ||
Name::cast(candidate_key).IsUniqueName());
@@ -380,16 +435,48 @@ V8_EXPORT_PRIVATE int OrderedHashTable<OrderedNameDictionary, 3>::FindEntry(
// TODO(gsathya): This is loading the bucket count from the hash
// table for every iteration. This should be peeled out of the
// loop.
- entry = NextChainEntry(entry);
+ raw_entry = NextChainEntryRaw(raw_entry);
}
- return kNotFound;
+ return InternalIndex::NotFound();
+}
+
+// TODO(emrich): This is almost an identical copy of
+// Dictionary<..>::SlowReverseLookup.
+// Consolidate both versions elsewhere (e.g., hash-table-utils)?
+Object OrderedNameDictionary::SlowReverseLookup(Isolate* isolate,
+ Object value) {
+ ReadOnlyRoots roots(isolate);
+ for (InternalIndex i : IterateEntries()) {
+ Object k;
+ if (!ToKey(roots, i, &k)) continue;
+ Object e = this->ValueAt(i);
+ if (e == value) return k;
+ }
+ return roots.undefined_value();
+}
+
+// TODO(emrich): This is almost an identical copy of
+// HashTable<..>::NumberOfEnumerableProperties.
+// Consolidate both versions elsewhere (e.g., hash-table-utils)?
+int OrderedNameDictionary::NumberOfEnumerableProperties() {
+ ReadOnlyRoots roots = this->GetReadOnlyRoots();
+ int result = 0;
+ for (InternalIndex i : this->IterateEntries()) {
+ Object k;
+ if (!this->ToKey(roots, i, &k)) continue;
+ if (k.FilterKey(ENUMERABLE_STRINGS)) continue;
+ PropertyDetails details = this->DetailsAt(i);
+ PropertyAttributes attr = details.attributes();
+ if ((attr & ONLY_ENUMERABLE) == 0) result++;
+ }
+ return result;
}
MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Add(
Isolate* isolate, Handle<OrderedNameDictionary> table, Handle<Name> key,
Handle<Object> value, PropertyDetails details) {
- DCHECK_EQ(kNotFound, table->FindEntry(isolate, *key));
+ DCHECK(table->FindEntry(isolate, *key).is_not_found());
MaybeHandle<OrderedNameDictionary> table_candidate =
OrderedNameDictionary::EnsureGrowable(isolate, table);
@@ -399,11 +486,11 @@ MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Add(
// Read the existing bucket values.
int hash = key->Hash();
int bucket = table->HashToBucket(hash);
- int previous_entry = table->HashToEntry(hash);
+ int previous_entry = table->HashToEntryRaw(hash);
int nof = table->NumberOfElements();
// Insert a new entry at the end,
int new_entry = nof + table->NumberOfDeletedElements();
- int new_index = table->EntryToIndex(new_entry);
+ int new_index = table->EntryToIndexRaw(new_entry);
table->set(new_index, *key);
table->set(new_index + kValueOffset, *value);
@@ -419,8 +506,8 @@ MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Add(
return table;
}
-void OrderedNameDictionary::SetEntry(int entry, Object key, Object value,
- PropertyDetails details) {
+void OrderedNameDictionary::SetEntry(InternalIndex entry, Object key,
+ Object value, PropertyDetails details) {
DisallowHeapAllocation gc;
DCHECK_IMPLIES(!key.IsName(), key.IsTheHole());
DisallowHeapAllocation no_gc;
@@ -435,8 +522,9 @@ void OrderedNameDictionary::SetEntry(int entry, Object key, Object value,
}
Handle<OrderedNameDictionary> OrderedNameDictionary::DeleteEntry(
- Isolate* isolate, Handle<OrderedNameDictionary> table, int entry) {
- DCHECK_NE(entry, kNotFound);
+ Isolate* isolate, Handle<OrderedNameDictionary> table,
+ InternalIndex entry) {
+ DCHECK(entry.is_found());
Object hole = ReadOnlyRoots(isolate).the_hole_value();
PropertyDetails details = PropertyDetails::Empty();
@@ -452,25 +540,47 @@ Handle<OrderedNameDictionary> OrderedNameDictionary::DeleteEntry(
MaybeHandle<OrderedHashSet> OrderedHashSet::Allocate(
Isolate* isolate, int capacity, AllocationType allocation) {
- return OrderedHashTable<OrderedHashSet, 1>::Allocate(isolate, capacity,
- allocation);
+ return Base::Allocate(isolate, capacity, allocation);
}
MaybeHandle<OrderedHashMap> OrderedHashMap::Allocate(
Isolate* isolate, int capacity, AllocationType allocation) {
- return OrderedHashTable<OrderedHashMap, 2>::Allocate(isolate, capacity,
- allocation);
+ return Base::Allocate(isolate, capacity, allocation);
}
MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Allocate(
Isolate* isolate, int capacity, AllocationType allocation) {
MaybeHandle<OrderedNameDictionary> table_candidate =
- OrderedHashTable<OrderedNameDictionary, 3>::Allocate(isolate, capacity,
- allocation);
+ Base::Allocate(isolate, capacity, allocation);
+ Handle<OrderedNameDictionary> table;
+ if (table_candidate.ToHandle(&table)) {
+ table->SetHash(PropertyArray::kNoHashSentinel);
+ }
+ return table_candidate;
+}
+
+MaybeHandle<OrderedHashSet> OrderedHashSet::AllocateEmpty(
+ Isolate* isolate, AllocationType allocation) {
+ RootIndex ri = RootIndex::kEmptyOrderedHashSet;
+ return Base::AllocateEmpty(isolate, allocation, ri);
+}
+
+MaybeHandle<OrderedHashMap> OrderedHashMap::AllocateEmpty(
+ Isolate* isolate, AllocationType allocation) {
+ RootIndex ri = RootIndex::kEmptyOrderedHashMap;
+ return Base::AllocateEmpty(isolate, allocation, ri);
+}
+
+MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::AllocateEmpty(
+ Isolate* isolate, AllocationType allocation) {
+ RootIndex ri = RootIndex::kEmptyOrderedPropertyDictionary;
+ MaybeHandle<OrderedNameDictionary> table_candidate =
+ Base::AllocateEmpty(isolate, allocation, ri);
Handle<OrderedNameDictionary> table;
if (table_candidate.ToHandle(&table)) {
table->SetHash(PropertyArray::kNoHashSentinel);
}
+
return table_candidate;
}
@@ -492,8 +602,8 @@ template V8_EXPORT_PRIVATE bool OrderedHashTable<OrderedHashSet, 1>::HasKey(
template V8_EXPORT_PRIVATE bool OrderedHashTable<OrderedHashSet, 1>::Delete(
Isolate* isolate, OrderedHashSet table, Object key);
-template V8_EXPORT_PRIVATE int OrderedHashTable<OrderedHashSet, 1>::FindEntry(
- Isolate* isolate, Object key);
+template V8_EXPORT_PRIVATE InternalIndex
+OrderedHashTable<OrderedHashSet, 1>::FindEntry(Isolate* isolate, Object key);
template V8_EXPORT_PRIVATE MaybeHandle<OrderedHashMap>
OrderedHashTable<OrderedHashMap, 2>::EnsureGrowable(
@@ -513,10 +623,10 @@ template V8_EXPORT_PRIVATE bool OrderedHashTable<OrderedHashMap, 2>::HasKey(
template V8_EXPORT_PRIVATE bool OrderedHashTable<OrderedHashMap, 2>::Delete(
Isolate* isolate, OrderedHashMap table, Object key);
-template V8_EXPORT_PRIVATE int OrderedHashTable<OrderedHashMap, 2>::FindEntry(
- Isolate* isolate, Object key);
+template V8_EXPORT_PRIVATE InternalIndex
+OrderedHashTable<OrderedHashMap, 2>::FindEntry(Isolate* isolate, Object key);
-template Handle<OrderedNameDictionary>
+template V8_EXPORT_PRIVATE Handle<OrderedNameDictionary>
OrderedHashTable<OrderedNameDictionary, 3>::Shrink(
Isolate* isolate, Handle<OrderedNameDictionary> table);
@@ -679,29 +789,30 @@ bool SmallOrderedHashMap::HasKey(Isolate* isolate, Handle<Object> key) {
}
template <>
-int V8_EXPORT_PRIVATE
+InternalIndex V8_EXPORT_PRIVATE
SmallOrderedHashTable<SmallOrderedNameDictionary>::FindEntry(Isolate* isolate,
Object key) {
DisallowHeapAllocation no_gc;
DCHECK(key.IsUniqueName());
Name raw_key = Name::cast(key);
- int entry = HashToFirstEntry(raw_key.Hash());
+ int raw_entry = HashToFirstEntry(raw_key.Hash());
// Walk the chain in the bucket to find the key.
- while (entry != kNotFound) {
+ while (raw_entry != kNotFound) {
+ InternalIndex entry(raw_entry);
Object candidate_key = KeyAt(entry);
if (candidate_key == key) return entry;
- entry = GetNextEntry(entry);
+ raw_entry = GetNextEntry(raw_entry);
}
- return kNotFound;
+ return InternalIndex::NotFound();
}
MaybeHandle<SmallOrderedNameDictionary> SmallOrderedNameDictionary::Add(
Isolate* isolate, Handle<SmallOrderedNameDictionary> table,
Handle<Name> key, Handle<Object> value, PropertyDetails details) {
- DCHECK_EQ(kNotFound, table->FindEntry(isolate, *key));
+ DCHECK(table->FindEntry(isolate, *key).is_not_found());
if (table->UsedCapacity() >= table->Capacity()) {
MaybeHandle<SmallOrderedNameDictionary> new_table =
@@ -739,15 +850,17 @@ MaybeHandle<SmallOrderedNameDictionary> SmallOrderedNameDictionary::Add(
return table;
}
-void SmallOrderedNameDictionary::SetEntry(int entry, Object key, Object value,
+void SmallOrderedNameDictionary::SetEntry(InternalIndex entry, Object key,
+ Object value,
PropertyDetails details) {
+ int raw_entry = entry.as_int();
DCHECK_IMPLIES(!key.IsName(), key.IsTheHole());
- SetDataEntry(entry, SmallOrderedNameDictionary::kValueIndex, value);
- SetDataEntry(entry, SmallOrderedNameDictionary::kKeyIndex, key);
+ SetDataEntry(raw_entry, SmallOrderedNameDictionary::kValueIndex, value);
+ SetDataEntry(raw_entry, SmallOrderedNameDictionary::kKeyIndex, key);
// TODO(gsathya): PropertyDetails should be stored as part of the
// data table to save more memory.
- SetDataEntry(entry, SmallOrderedNameDictionary::kPropertyDetailsIndex,
+ SetDataEntry(raw_entry, SmallOrderedNameDictionary::kPropertyDetailsIndex,
details.AsSmi());
}
@@ -755,22 +868,22 @@ template <class Derived>
bool SmallOrderedHashTable<Derived>::HasKey(Isolate* isolate,
Handle<Object> key) {
DisallowHeapAllocation no_gc;
- return FindEntry(isolate, *key) != kNotFound;
+ return FindEntry(isolate, *key).is_found();
}
template <class Derived>
bool SmallOrderedHashTable<Derived>::Delete(Isolate* isolate, Derived table,
Object key) {
DisallowHeapAllocation no_gc;
- int entry = table.FindEntry(isolate, key);
- if (entry == kNotFound) return false;
+ InternalIndex entry = table.FindEntry(isolate, key);
+ if (entry.is_not_found()) return false;
int nof = table.NumberOfElements();
int nod = table.NumberOfDeletedElements();
Object hole = ReadOnlyRoots(isolate).the_hole_value();
for (int j = 0; j < Derived::kEntrySize; j++) {
- table.SetDataEntry(entry, j, hole);
+ table.SetDataEntry(entry.as_int(), j, hole);
}
table.SetNumberOfElements(nof - 1);
@@ -780,8 +893,9 @@ bool SmallOrderedHashTable<Derived>::Delete(Isolate* isolate, Derived table,
}
Handle<SmallOrderedNameDictionary> SmallOrderedNameDictionary::DeleteEntry(
- Isolate* isolate, Handle<SmallOrderedNameDictionary> table, int entry) {
- DCHECK_NE(entry, kNotFound);
+ Isolate* isolate, Handle<SmallOrderedNameDictionary> table,
+ InternalIndex entry) {
+ DCHECK(entry.is_found());
{
DisallowHeapAllocation no_gc;
Object hole = ReadOnlyRoots(isolate).the_hole_value();
@@ -806,13 +920,11 @@ Handle<Derived> SmallOrderedHashTable<Derived>::Rehash(Isolate* isolate,
isolate, new_capacity,
Heap::InYoungGeneration(*table) ? AllocationType::kYoung
: AllocationType::kOld);
- int nof = table->NumberOfElements();
- int nod = table->NumberOfDeletedElements();
int new_entry = 0;
{
DisallowHeapAllocation no_gc;
- for (int old_entry = 0; old_entry < (nof + nod); ++old_entry) {
+ for (InternalIndex old_entry : table->IterateEntries()) {
Object key = table->KeyAt(old_entry);
if (key.IsTheHole(isolate)) continue;
@@ -824,14 +936,14 @@ Handle<Derived> SmallOrderedHashTable<Derived>::Rehash(Isolate* isolate,
new_table->SetNextEntry(new_entry, chain);
for (int i = 0; i < Derived::kEntrySize; ++i) {
- Object value = table->GetDataEntry(old_entry, i);
+ Object value = table->GetDataEntry(old_entry.as_int(), i);
new_table->SetDataEntry(new_entry, i, value);
}
++new_entry;
}
- new_table->SetNumberOfElements(nof);
+ new_table->SetNumberOfElements(table->NumberOfElements());
}
return new_table;
}
@@ -895,20 +1007,22 @@ MaybeHandle<Derived> SmallOrderedHashTable<Derived>::Grow(
}
template <class Derived>
-int SmallOrderedHashTable<Derived>::FindEntry(Isolate* isolate, Object key) {
+InternalIndex SmallOrderedHashTable<Derived>::FindEntry(Isolate* isolate,
+ Object key) {
DisallowHeapAllocation no_gc;
Object hash = key.GetHash();
- if (hash.IsUndefined(isolate)) return kNotFound;
- int entry = HashToFirstEntry(Smi::ToInt(hash));
+ if (hash.IsUndefined(isolate)) return InternalIndex::NotFound();
+ int raw_entry = HashToFirstEntry(Smi::ToInt(hash));
// Walk the chain in the bucket to find the key.
- while (entry != kNotFound) {
+ while (raw_entry != kNotFound) {
+ InternalIndex entry(raw_entry);
Object candidate_key = KeyAt(entry);
if (candidate_key.SameValueZero(key)) return entry;
- entry = GetNextEntry(entry);
+ raw_entry = GetNextEntry(raw_entry);
}
- return kNotFound;
+ return InternalIndex::NotFound();
}
template bool EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
@@ -1031,17 +1145,16 @@ MaybeHandle<OrderedHashMap> OrderedHashMapHandler::AdjustRepresentation(
if (!new_table_candidate.ToHandle(&new_table)) {
return new_table_candidate;
}
- int nof = table->NumberOfElements();
- int nod = table->NumberOfDeletedElements();
// TODO(gsathya): Optimize the lookup to not re calc offsets. Also,
// unhandlify this code as we preallocate the new backing store with
// the proper capacity.
- for (int entry = 0; entry < (nof + nod); ++entry) {
+ for (InternalIndex entry : table->IterateEntries()) {
Handle<Object> key = handle(table->KeyAt(entry), isolate);
if (key->IsTheHole(isolate)) continue;
Handle<Object> value = handle(
- table->GetDataEntry(entry, SmallOrderedHashMap::kValueIndex), isolate);
+ table->GetDataEntry(entry.as_int(), SmallOrderedHashMap::kValueIndex),
+ isolate);
new_table_candidate = OrderedHashMap::Add(isolate, new_table, key, value);
if (!new_table_candidate.ToHandle(&new_table)) {
return new_table_candidate;
@@ -1059,13 +1172,11 @@ MaybeHandle<OrderedHashSet> OrderedHashSetHandler::AdjustRepresentation(
if (!new_table_candidate.ToHandle(&new_table)) {
return new_table_candidate;
}
- int nof = table->NumberOfElements();
- int nod = table->NumberOfDeletedElements();
// TODO(gsathya): Optimize the lookup to not re calc offsets. Also,
// unhandlify this code as we preallocate the new backing store with
// the proper capacity.
- for (int entry = 0; entry < (nof + nod); ++entry) {
+ for (InternalIndex entry : table->IterateEntries()) {
Handle<Object> key = handle(table->KeyAt(entry), isolate);
if (key->IsTheHole(isolate)) continue;
new_table_candidate = OrderedHashSet::Add(isolate, new_table, key);
@@ -1086,13 +1197,11 @@ OrderedNameDictionaryHandler::AdjustRepresentation(
if (!new_table_candidate.ToHandle(&new_table)) {
return new_table_candidate;
}
- int nof = table->NumberOfElements();
- int nod = table->NumberOfDeletedElements();
// TODO(gsathya): Optimize the lookup to not re calc offsets. Also,
// unhandlify this code as we preallocate the new backing store with
// the proper capacity.
- for (int entry = 0; entry < (nof + nod); ++entry) {
+ for (InternalIndex entry : table->IterateEntries()) {
Handle<Name> key(Name::cast(table->KeyAt(entry)), isolate);
if (key->IsTheHole(isolate)) continue;
Handle<Object> value(table->ValueAt(entry), isolate);
@@ -1180,8 +1289,9 @@ MaybeHandle<HeapObject> OrderedNameDictionaryHandler::Add(
isolate, Handle<OrderedNameDictionary>::cast(table), key, value, details);
}
-void OrderedNameDictionaryHandler::SetEntry(HeapObject table, int entry,
- Object key, Object value,
+void OrderedNameDictionaryHandler::SetEntry(HeapObject table,
+ InternalIndex entry, Object key,
+ Object value,
PropertyDetails details) {
DisallowHeapAllocation no_gc;
if (table.IsSmallOrderedNameDictionary()) {
@@ -1190,28 +1300,24 @@ void OrderedNameDictionaryHandler::SetEntry(HeapObject table, int entry,
}
DCHECK(table.IsOrderedNameDictionary());
- return OrderedNameDictionary::cast(table).SetEntry(entry, key, value,
- details);
+ return OrderedNameDictionary::cast(table).SetEntry(InternalIndex(entry), key,
+ value, details);
}
-int OrderedNameDictionaryHandler::FindEntry(Isolate* isolate, HeapObject table,
- Name key) {
+InternalIndex OrderedNameDictionaryHandler::FindEntry(Isolate* isolate,
+ HeapObject table,
+ Name key) {
DisallowHeapAllocation no_gc;
if (table.IsSmallOrderedNameDictionary()) {
- int entry = SmallOrderedNameDictionary::cast(table).FindEntry(isolate, key);
- return entry == SmallOrderedNameDictionary::kNotFound
- ? OrderedNameDictionaryHandler::kNotFound
- : entry;
+ return SmallOrderedNameDictionary::cast(table).FindEntry(isolate, key);
}
DCHECK(table.IsOrderedNameDictionary());
- int entry = OrderedNameDictionary::cast(table).FindEntry(isolate, key);
- return entry == OrderedNameDictionary::kNotFound
- ? OrderedNameDictionaryHandler::kNotFound
- : entry;
+ return OrderedNameDictionary::cast(table).FindEntry(isolate, key);
}
-Object OrderedNameDictionaryHandler::ValueAt(HeapObject table, int entry) {
+Object OrderedNameDictionaryHandler::ValueAt(HeapObject table,
+ InternalIndex entry) {
if (table.IsSmallOrderedNameDictionary()) {
return SmallOrderedNameDictionary::cast(table).ValueAt(entry);
}
@@ -1220,7 +1326,8 @@ Object OrderedNameDictionaryHandler::ValueAt(HeapObject table, int entry) {
return OrderedNameDictionary::cast(table).ValueAt(entry);
}
-void OrderedNameDictionaryHandler::ValueAtPut(HeapObject table, int entry,
+void OrderedNameDictionaryHandler::ValueAtPut(HeapObject table,
+ InternalIndex entry,
Object value) {
if (table.IsSmallOrderedNameDictionary()) {
return SmallOrderedNameDictionary::cast(table).ValueAtPut(entry, value);
@@ -1231,7 +1338,7 @@ void OrderedNameDictionaryHandler::ValueAtPut(HeapObject table, int entry,
}
PropertyDetails OrderedNameDictionaryHandler::DetailsAt(HeapObject table,
- int entry) {
+ InternalIndex entry) {
if (table.IsSmallOrderedNameDictionary()) {
return SmallOrderedNameDictionary::cast(table).DetailsAt(entry);
}
@@ -1240,7 +1347,8 @@ PropertyDetails OrderedNameDictionaryHandler::DetailsAt(HeapObject table,
return OrderedNameDictionary::cast(table).DetailsAt(entry);
}
-void OrderedNameDictionaryHandler::DetailsAtPut(HeapObject table, int entry,
+void OrderedNameDictionaryHandler::DetailsAtPut(HeapObject table,
+ InternalIndex entry,
PropertyDetails details) {
if (table.IsSmallOrderedNameDictionary()) {
return SmallOrderedNameDictionary::cast(table).DetailsAtPut(entry, details);
@@ -1268,12 +1376,14 @@ void OrderedNameDictionaryHandler::SetHash(HeapObject table, int hash) {
OrderedNameDictionary::cast(table).SetHash(hash);
}
-Name OrderedNameDictionaryHandler::KeyAt(HeapObject table, int entry) {
+Name OrderedNameDictionaryHandler::KeyAt(HeapObject table,
+ InternalIndex entry) {
if (table.IsSmallOrderedNameDictionary()) {
return Name::cast(SmallOrderedNameDictionary::cast(table).KeyAt(entry));
}
- return Name::cast(OrderedNameDictionary::cast(table).KeyAt(entry));
+ return Name::cast(
+ OrderedNameDictionary::cast(table).KeyAt(InternalIndex(entry)));
}
int OrderedNameDictionaryHandler::NumberOfElements(HeapObject table) {
@@ -1306,7 +1416,7 @@ Handle<HeapObject> OrderedNameDictionaryHandler::Shrink(
}
Handle<HeapObject> OrderedNameDictionaryHandler::DeleteEntry(
- Isolate* isolate, Handle<HeapObject> table, int entry) {
+ Isolate* isolate, Handle<HeapObject> table, InternalIndex entry) {
DisallowHeapAllocation no_gc;
if (table->IsSmallOrderedNameDictionary()) {
Handle<SmallOrderedNameDictionary> small_dict =
@@ -1316,7 +1426,8 @@ Handle<HeapObject> OrderedNameDictionaryHandler::DeleteEntry(
Handle<OrderedNameDictionary> large_dict =
Handle<OrderedNameDictionary>::cast(table);
- return OrderedNameDictionary::DeleteEntry(isolate, large_dict, entry);
+ return OrderedNameDictionary::DeleteEntry(isolate, large_dict,
+ InternalIndex(entry));
}
template <class Derived, class TableType>
@@ -1326,6 +1437,7 @@ void OrderedHashTableIterator<Derived, TableType>::Transition() {
if (!table.IsObsolete()) return;
int index = Smi::ToInt(this->index());
+ DCHECK_LE(0, index);
while (table.IsObsolete()) {
TableType next_table = table.NextTable();
@@ -1362,7 +1474,8 @@ bool OrderedHashTableIterator<Derived, TableType>::HasMore() {
int index = Smi::ToInt(this->index());
int used_capacity = table.UsedCapacity();
- while (index < used_capacity && table.KeyAt(index).IsTheHole(ro_roots)) {
+ while (index < used_capacity &&
+ table.KeyAt(InternalIndex(index)).IsTheHole(ro_roots)) {
index++;
}
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index 5f3c45a110..0172986d1f 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -8,6 +8,7 @@
#include "src/base/export-template.h"
#include "src/common/globals.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/internal-index.h"
#include "src/objects/js-objects.h"
#include "src/objects/smi.h"
#include "src/roots/roots.h"
@@ -34,7 +35,7 @@ namespace internal {
// [kPrefixSize]: element count
// [kPrefixSize + 1]: deleted element count
// [kPrefixSize + 2]: bucket count
-// [kPrefixSize + 3..(3 + NumberOfBuckets() - 1)]: "hash table",
+// [kPrefixSize + 3..(kPrefixSize + 3 + NumberOfBuckets() - 1)]: "hash table",
// where each item is an offset into the
// data table (see below) where the first
// item in this bucket is stored.
@@ -52,13 +53,15 @@ namespace internal {
//
// Memory layout for obsolete table:
// [0] : Prefix
-// [kPrefixSize + 0]: bucket count
-// [kPrefixSize + 1]: Next newer table
-// [kPrefixSize + 2]: Number of removed holes or -1 when the table was
-// cleared.
-// [kPrefixSize + 3..(3 + NumberOfRemovedHoles() - 1)]: The indexes
-// of the removed holes.
-// [kPrefixSize + 3 + NumberOfRemovedHoles()..length]: Not used
+// [kPrefixSize + 0]: Next newer table
+// [kPrefixSize + 1]: deleted element count or kClearedTableSentinel if
+// the table was cleared
+// [kPrefixSize + 2]: bucket count
+// [kPrefixSize + 3..(kPrefixSize + 3 + NumberOfDeletedElements() - 1)]:
+// The indexes of the removed holes. This part is only
+// usable for non-cleared tables, as clearing removes the
+// deleted elements count.
+// [kPrefixSize + 3 + NumberOfDeletedElements()..length]: Not used
template <class Derived, int entrysize>
class OrderedHashTable : public FixedArray {
public:
@@ -78,11 +81,17 @@ class OrderedHashTable : public FixedArray {
// Returns true if the OrderedHashTable contains the key
static bool HasKey(Isolate* isolate, Derived table, Object key);
+ // Returns whether a potential key |k| returned by KeyAt is a real
+ // key (meaning that it is not a hole).
+ static inline bool IsKey(ReadOnlyRoots roots, Object k);
+
// Returns a true value if the OrderedHashTable contains the key and
// the key has been deleted. This does not shrink the table.
static bool Delete(Isolate* isolate, Derived table, Object key);
- int FindEntry(Isolate* isolate, Object key);
+ InternalIndex FindEntry(Isolate* isolate, Object key);
+
+ Object SlowReverseLookup(Isolate* isolate, Object value);
int NumberOfElements() const {
return Smi::ToInt(get(NumberOfElementsIndex()));
@@ -102,30 +111,20 @@ class OrderedHashTable : public FixedArray {
return Smi::ToInt(get(NumberOfBucketsIndex()));
}
- // Returns an index into |this| for the given entry.
- int EntryToIndex(int entry) {
- return HashTableStartIndex() + NumberOfBuckets() + (entry * kEntrySize);
- }
-
- int HashToBucket(int hash) { return hash & (NumberOfBuckets() - 1); }
-
- int HashToEntry(int hash) {
- int bucket = HashToBucket(hash);
- Object entry = this->get(HashTableStartIndex() + bucket);
- return Smi::ToInt(entry);
- }
-
- int NextChainEntry(int entry) {
- Object next_entry = get(EntryToIndex(entry) + kChainOffset);
- return Smi::ToInt(next_entry);
+ InternalIndex::Range IterateEntries() {
+ return InternalIndex::Range(UsedCapacity());
}
- // use KeyAt(i)->IsTheHole(isolate) to determine if this is a deleted entry.
- Object KeyAt(int entry) {
- DCHECK_LT(entry, this->UsedCapacity());
+ // use IsKey to check if this is a deleted entry.
+ Object KeyAt(InternalIndex entry) {
+ DCHECK_LT(entry.as_int(), this->UsedCapacity());
return get(EntryToIndex(entry));
}
+ // Similar to KeyAt, but indicates whether the given entry is valid
+ // (not deleted one)
+ bool ToKey(ReadOnlyRoots roots, InternalIndex entry, Object* out_key);
+
bool IsObsolete() { return !get(NextTableIndex()).IsSmi(); }
// The next newer table. This is only valid if the table is obsolete.
@@ -142,7 +141,9 @@ class OrderedHashTable : public FixedArray {
static const int kChainOffset = entrysize;
static const int kNotFound = -1;
- static const int kMinCapacity = 4;
+ // The minimum capacity. Note that despite this value, 0 is also a permitted
+ // capacity, indicating a table without any storage for elements.
+ static const int kInitialCapacity = 4;
static constexpr int PrefixIndex() { return 0; }
@@ -202,10 +203,41 @@ class OrderedHashTable : public FixedArray {
Isolate* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
+ static MaybeHandle<Derived> AllocateEmpty(Isolate* isolate,
+ AllocationType allocation,
+ RootIndex root_ndex);
+
static MaybeHandle<Derived> Rehash(Isolate* isolate, Handle<Derived> table);
static MaybeHandle<Derived> Rehash(Isolate* isolate, Handle<Derived> table,
int new_capacity);
+ int HashToEntryRaw(int hash) {
+ int bucket = HashToBucket(hash);
+ Object entry = this->get(HashTableStartIndex() + bucket);
+ int entry_int = Smi::ToInt(entry);
+ DCHECK(entry_int == kNotFound || entry_int >= 0);
+ return entry_int;
+ }
+
+ int NextChainEntryRaw(int entry) {
+ DCHECK_LT(entry, this->UsedCapacity());
+ Object next_entry = get(EntryToIndexRaw(entry) + kChainOffset);
+ int next_entry_int = Smi::ToInt(next_entry);
+ DCHECK(next_entry_int == kNotFound || next_entry_int >= 0);
+ return next_entry_int;
+ }
+
+ // Returns an index into |this| for the given entry.
+ int EntryToIndexRaw(int entry) {
+ return HashTableStartIndex() + NumberOfBuckets() + (entry * kEntrySize);
+ }
+
+ int EntryToIndex(InternalIndex entry) {
+ return EntryToIndexRaw(entry.as_int());
+ }
+
+ int HashToBucket(int hash) { return hash & (NumberOfBuckets() - 1); }
+
void SetNumberOfBuckets(int num) {
set(NumberOfBucketsIndex(), Smi::FromInt(num));
}
@@ -235,6 +267,8 @@ class OrderedHashTable : public FixedArray {
class V8_EXPORT_PRIVATE OrderedHashSet
: public OrderedHashTable<OrderedHashSet, 1> {
+ using Base = OrderedHashTable<OrderedHashSet, 1>;
+
public:
DECL_CAST(OrderedHashSet)
@@ -252,6 +286,10 @@ class V8_EXPORT_PRIVATE OrderedHashSet
static MaybeHandle<OrderedHashSet> Allocate(
Isolate* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
+
+ static MaybeHandle<OrderedHashSet> AllocateEmpty(
+ Isolate* isolate, AllocationType allocation = AllocationType::kReadOnly);
+
static HeapObject GetEmpty(ReadOnlyRoots ro_roots);
static inline Handle<Map> GetMap(ReadOnlyRoots roots);
static inline bool Is(Handle<HeapObject> table);
@@ -262,6 +300,8 @@ class V8_EXPORT_PRIVATE OrderedHashSet
class V8_EXPORT_PRIVATE OrderedHashMap
: public OrderedHashTable<OrderedHashMap, 2> {
+ using Base = OrderedHashTable<OrderedHashMap, 2>;
+
public:
DECL_CAST(OrderedHashMap)
@@ -275,12 +315,16 @@ class V8_EXPORT_PRIVATE OrderedHashMap
static MaybeHandle<OrderedHashMap> Allocate(
Isolate* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
+
+ static MaybeHandle<OrderedHashMap> AllocateEmpty(
+ Isolate* isolate, AllocationType allocation = AllocationType::kReadOnly);
+
static MaybeHandle<OrderedHashMap> Rehash(Isolate* isolate,
Handle<OrderedHashMap> table,
int new_capacity);
static MaybeHandle<OrderedHashMap> Rehash(Isolate* isolate,
Handle<OrderedHashMap> table);
- Object ValueAt(int entry);
+ Object ValueAt(InternalIndex entry);
// This takes and returns raw Address values containing tagged Object
// pointers because it is called via ExternalReference.
@@ -371,7 +415,7 @@ class SmallOrderedHashTable : public HeapObject {
// we've already reached MaxCapacity.
static MaybeHandle<Derived> Grow(Isolate* isolate, Handle<Derived> table);
- int FindEntry(Isolate* isolate, Object key);
+ InternalIndex FindEntry(Isolate* isolate, Object key);
static Handle<Derived> Shrink(Isolate* isolate, Handle<Derived> table);
// Iterates only fields in the DataTable.
@@ -418,7 +462,11 @@ class SmallOrderedHashTable : public HeapObject {
int NumberOfBuckets() const { return getByte(NumberOfBucketsOffset(), 0); }
- V8_INLINE Object KeyAt(int entry) const;
+ V8_INLINE Object KeyAt(InternalIndex entry) const;
+
+ InternalIndex::Range IterateEntries() {
+ return InternalIndex::Range(UsedCapacity());
+ }
DECL_VERIFIER(SmallOrderedHashTable)
@@ -460,8 +508,7 @@ class SmallOrderedHashTable : public HeapObject {
}
Address GetHashTableStartAddress(int capacity) const {
- return FIELD_ADDR(*this,
- DataTableStartOffset() + DataTableSizeFor(capacity));
+ return field_address(DataTableStartOffset() + DataTableSizeFor(capacity));
}
void SetFirstEntry(int bucket, byte value) {
@@ -699,39 +746,54 @@ class V8_EXPORT_PRIVATE OrderedHashSetHandler
Isolate* isolate, Handle<SmallOrderedHashSet> table);
};
-class OrderedNameDictionary
+class V8_EXPORT_PRIVATE OrderedNameDictionary
: public OrderedHashTable<OrderedNameDictionary, 3> {
+ using Base = OrderedHashTable<OrderedNameDictionary, 3>;
+
public:
DECL_CAST(OrderedNameDictionary)
- V8_EXPORT_PRIVATE static MaybeHandle<OrderedNameDictionary> Add(
+ static MaybeHandle<OrderedNameDictionary> Add(
Isolate* isolate, Handle<OrderedNameDictionary> table, Handle<Name> key,
Handle<Object> value, PropertyDetails details);
- V8_EXPORT_PRIVATE void SetEntry(int entry, Object key, Object value,
- PropertyDetails details);
+ void SetEntry(InternalIndex entry, Object key, Object value,
+ PropertyDetails details);
+
+ InternalIndex FindEntry(Isolate* isolate, Object key);
+
+ int NumberOfEnumerableProperties();
+
+ Object SlowReverseLookup(Isolate* isolate, Object value);
- V8_EXPORT_PRIVATE static Handle<OrderedNameDictionary> DeleteEntry(
- Isolate* isolate, Handle<OrderedNameDictionary> table, int entry);
+ static Handle<OrderedNameDictionary> DeleteEntry(
+ Isolate* isolate, Handle<OrderedNameDictionary> table,
+ InternalIndex entry);
static MaybeHandle<OrderedNameDictionary> Allocate(
Isolate* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
+ static MaybeHandle<OrderedNameDictionary> AllocateEmpty(
+ Isolate* isolate, AllocationType allocation = AllocationType::kReadOnly);
+
static MaybeHandle<OrderedNameDictionary> Rehash(
Isolate* isolate, Handle<OrderedNameDictionary> table, int new_capacity);
// Returns the value for entry.
- inline Object ValueAt(int entry);
+ inline Object ValueAt(InternalIndex entry);
+
+ // Like KeyAt, but casts to Name
+ inline Name NameAt(InternalIndex entry);
// Set the value for entry.
- inline void ValueAtPut(int entry, Object value);
+ inline void ValueAtPut(InternalIndex entry, Object value);
// Returns the property details for the property at entry.
- inline PropertyDetails DetailsAt(int entry);
+ inline PropertyDetails DetailsAt(InternalIndex entry);
// Set the details for entry.
- inline void DetailsAtPut(int entry, PropertyDetails value);
+ inline void DetailsAtPut(InternalIndex entry, PropertyDetails value);
inline void SetHash(int hash);
inline int Hash();
@@ -744,6 +806,8 @@ class OrderedNameDictionary
static const int kPropertyDetailsOffset = 2;
static const int kPrefixSize = 1;
+ static const bool kIsOrderedDictionaryType = true;
+
OBJECT_CONSTRUCTORS(OrderedNameDictionary,
OrderedHashTable<OrderedNameDictionary, 3>);
};
@@ -761,24 +825,26 @@ class V8_EXPORT_PRIVATE OrderedNameDictionaryHandler
static Handle<HeapObject> Shrink(Isolate* isolate, Handle<HeapObject> table);
static Handle<HeapObject> DeleteEntry(Isolate* isolate,
- Handle<HeapObject> table, int entry);
- static int FindEntry(Isolate* isolate, HeapObject table, Name key);
- static void SetEntry(HeapObject table, int entry, Object key, Object value,
- PropertyDetails details);
+ Handle<HeapObject> table,
+ InternalIndex entry);
+ static InternalIndex FindEntry(Isolate* isolate, HeapObject table, Name key);
+ static void SetEntry(HeapObject table, InternalIndex entry, Object key,
+ Object value, PropertyDetails details);
// Returns the value for entry.
- static Object ValueAt(HeapObject table, int entry);
+ static Object ValueAt(HeapObject table, InternalIndex entry);
// Set the value for entry.
- static void ValueAtPut(HeapObject table, int entry, Object value);
+ static void ValueAtPut(HeapObject table, InternalIndex entry, Object value);
// Returns the property details for the property at entry.
- static PropertyDetails DetailsAt(HeapObject table, int entry);
+ static PropertyDetails DetailsAt(HeapObject table, InternalIndex entry);
// Set the details for entry.
- static void DetailsAtPut(HeapObject table, int entry, PropertyDetails value);
+ static void DetailsAtPut(HeapObject table, InternalIndex entry,
+ PropertyDetails value);
- static Name KeyAt(HeapObject table, int entry);
+ static Name KeyAt(HeapObject table, InternalIndex entry);
static void SetHash(HeapObject table, int hash);
static int Hash(HeapObject table);
@@ -786,8 +852,6 @@ class V8_EXPORT_PRIVATE OrderedNameDictionaryHandler
static int NumberOfElements(HeapObject table);
static int Capacity(HeapObject table);
- static const int kNotFound = -1;
-
protected:
static MaybeHandle<OrderedNameDictionary> AdjustRepresentation(
Isolate* isolate, Handle<SmallOrderedNameDictionary> table);
@@ -802,23 +866,24 @@ class SmallOrderedNameDictionary
DECL_VERIFIER(SmallOrderedNameDictionary)
// Returns the value for entry.
- inline Object ValueAt(int entry);
+ inline Object ValueAt(InternalIndex entry);
static Handle<SmallOrderedNameDictionary> Rehash(
Isolate* isolate, Handle<SmallOrderedNameDictionary> table,
int new_capacity);
V8_EXPORT_PRIVATE static Handle<SmallOrderedNameDictionary> DeleteEntry(
- Isolate* isolate, Handle<SmallOrderedNameDictionary> table, int entry);
+ Isolate* isolate, Handle<SmallOrderedNameDictionary> table,
+ InternalIndex entry);
// Set the value for entry.
- inline void ValueAtPut(int entry, Object value);
+ inline void ValueAtPut(InternalIndex entry, Object value);
// Returns the property details for the property at entry.
- inline PropertyDetails DetailsAt(int entry);
+ inline PropertyDetails DetailsAt(InternalIndex entry);
// Set the details for entry.
- inline void DetailsAtPut(int entry, PropertyDetails value);
+ inline void DetailsAtPut(InternalIndex entry, PropertyDetails value);
inline void SetHash(int hash);
inline int Hash();
@@ -836,7 +901,7 @@ class SmallOrderedNameDictionary
Isolate* isolate, Handle<SmallOrderedNameDictionary> table,
Handle<Name> key, Handle<Object> value, PropertyDetails details);
- V8_EXPORT_PRIVATE void SetEntry(int entry, Object key, Object value,
+ V8_EXPORT_PRIVATE void SetEntry(InternalIndex entry, Object key, Object value,
PropertyDetails details);
static inline Handle<Map> GetMap(ReadOnlyRoots roots);
diff --git a/deps/v8/src/objects/ordered-hash-table.tq b/deps/v8/src/objects/ordered-hash-table.tq
index d1b58d93eb..311f08aee7 100644
--- a/deps/v8/src/objects/ordered-hash-table.tq
+++ b/deps/v8/src/objects/ordered-hash-table.tq
@@ -23,7 +23,6 @@ extern class SmallOrderedHashTable extends HeapObject
extern macro SmallOrderedHashSetMapConstant(): Map;
const kSmallOrderedHashSetMap: Map = SmallOrderedHashSetMapConstant();
-@noVerifier
extern class SmallOrderedHashSet extends SmallOrderedHashTable {
number_of_elements: uint8;
number_of_deleted_elements: uint8;
@@ -62,7 +61,6 @@ struct HashMapEntry {
extern macro SmallOrderedHashMapMapConstant(): Map;
const kSmallOrderedHashMapMap: Map = SmallOrderedHashMapMapConstant();
-@noVerifier
extern class SmallOrderedHashMap extends SmallOrderedHashTable {
number_of_elements: uint8;
number_of_deleted_elements: uint8;
@@ -99,7 +97,6 @@ struct NameDictionaryEntry {
property_details: Smi|TheHole;
}
-@noVerifier
extern class SmallOrderedNameDictionary extends SmallOrderedHashTable {
hash: int32;
number_of_elements: uint8;
diff --git a/deps/v8/src/objects/primitive-heap-object-inl.h b/deps/v8/src/objects/primitive-heap-object-inl.h
index 609bf027da..a194126ceb 100644
--- a/deps/v8/src/objects/primitive-heap-object-inl.h
+++ b/deps/v8/src/objects/primitive-heap-object-inl.h
@@ -8,7 +8,6 @@
#include "src/objects/primitive-heap-object.h"
#include "src/objects/heap-object-inl.h"
-#include "torque-generated/class-definitions-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,6 +15,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/primitive-heap-object-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(PrimitiveHeapObject)
} // namespace internal
diff --git a/deps/v8/src/objects/primitive-heap-object.h b/deps/v8/src/objects/primitive-heap-object.h
index f7a57ffc08..14023c5198 100644
--- a/deps/v8/src/objects/primitive-heap-object.h
+++ b/deps/v8/src/objects/primitive-heap-object.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_PRIMITIVE_HEAP_OBJECT_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -14,6 +13,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/primitive-heap-object-tq.inc"
+
// An abstract superclass for classes representing JavaScript primitive values
// other than Smi. It doesn't carry any functionality but allows primitive
// classes to be identified in the type system.
diff --git a/deps/v8/src/objects/promise-inl.h b/deps/v8/src/objects/promise-inl.h
index da11731e25..8d3be5b68c 100644
--- a/deps/v8/src/objects/promise-inl.h
+++ b/deps/v8/src/objects/promise-inl.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/promise-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(PromiseReactionJobTask)
TQ_OBJECT_CONSTRUCTORS_IMPL(PromiseFulfillReactionJobTask)
TQ_OBJECT_CONSTRUCTORS_IMPL(PromiseRejectReactionJobTask)
diff --git a/deps/v8/src/objects/promise.h b/deps/v8/src/objects/promise.h
index 2582543f77..497498c166 100644
--- a/deps/v8/src/objects/promise.h
+++ b/deps/v8/src/objects/promise.h
@@ -15,6 +15,8 @@ namespace internal {
class JSPromise;
+#include "torque-generated/src/objects/promise-tq.inc"
+
// Struct to hold state required for PromiseReactionJob. See the comment on the
// PromiseReaction below for details on how this is being managed to reduce the
// memory and allocation overhead. This is the base class for the concrete
diff --git a/deps/v8/src/objects/property-array-inl.h b/deps/v8/src/objects/property-array-inl.h
index c942177554..d4a4bc94fa 100644
--- a/deps/v8/src/objects/property-array-inl.h
+++ b/deps/v8/src/objects/property-array-inl.h
@@ -25,11 +25,11 @@ SMI_ACCESSORS(PropertyArray, length_and_hash, kLengthAndHashOffset)
SYNCHRONIZED_SMI_ACCESSORS(PropertyArray, length_and_hash, kLengthAndHashOffset)
Object PropertyArray::get(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return get(isolate, index);
}
-Object PropertyArray::get(const Isolate* isolate, int index) const {
+Object PropertyArray::get(IsolateRoot isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index),
static_cast<unsigned>(this->length()));
return TaggedField<Object>::Relaxed_Load(isolate, *this,
diff --git a/deps/v8/src/objects/property-array.h b/deps/v8/src/objects/property-array.h
index e7fbb49c72..da15e8d732 100644
--- a/deps/v8/src/objects/property-array.h
+++ b/deps/v8/src/objects/property-array.h
@@ -30,7 +30,7 @@ class PropertyArray : public HeapObject {
inline int Hash() const;
inline Object get(int index) const;
- inline Object get(const Isolate* isolate, int index) const;
+ inline Object get(IsolateRoot isolate, int index) const;
inline void set(int index, Object value);
// Setter with explicit barrier mode.
diff --git a/deps/v8/src/objects/property-descriptor-object-inl.h b/deps/v8/src/objects/property-descriptor-object-inl.h
index 7033107613..3f16b16e56 100644
--- a/deps/v8/src/objects/property-descriptor-object-inl.h
+++ b/deps/v8/src/objects/property-descriptor-object-inl.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/property-descriptor-object-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(PropertyDescriptorObject)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/property-descriptor-object.h b/deps/v8/src/objects/property-descriptor-object.h
index 1b019e48f2..c9affb4ff7 100644
--- a/deps/v8/src/objects/property-descriptor-object.h
+++ b/deps/v8/src/objects/property-descriptor-object.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/property-descriptor-object-tq.inc"
+
class PropertyDescriptorObject
: public TorqueGeneratedPropertyDescriptorObject<PropertyDescriptorObject,
Struct> {
diff --git a/deps/v8/src/objects/property-descriptor.cc b/deps/v8/src/objects/property-descriptor.cc
index a14601bc74..e7bfd039de 100644
--- a/deps/v8/src/objects/property-descriptor.cc
+++ b/deps/v8/src/objects/property-descriptor.cc
@@ -57,7 +57,7 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
// TODO(jkummerow): support dictionary properties?
if (map->is_dictionary_map()) return false;
Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray>(map->instance_descriptors(kRelaxedLoad), isolate);
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
Handle<Object> value;
diff --git a/deps/v8/src/objects/property.cc b/deps/v8/src/objects/property.cc
index 9b94739bf2..a4336e295b 100644
--- a/deps/v8/src/objects/property.cc
+++ b/deps/v8/src/objects/property.cc
@@ -75,7 +75,7 @@ Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
Descriptor Descriptor::DataConstant(Handle<Name> key, Handle<Object> value,
PropertyAttributes attributes) {
- const Isolate* isolate = GetIsolateForPtrCompr(*key);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*key);
return Descriptor(key, MaybeObjectHandle(value), kData, attributes,
kDescriptor, PropertyConstness::kConst,
value->OptimalRepresentation(isolate), 0);
diff --git a/deps/v8/src/objects/prototype-info-inl.h b/deps/v8/src/objects/prototype-info-inl.h
index 8c93b21f24..9e18949db3 100644
--- a/deps/v8/src/objects/prototype-info-inl.h
+++ b/deps/v8/src/objects/prototype-info-inl.h
@@ -20,6 +20,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/prototype-info-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(PrototypeInfo)
Map PrototypeInfo::ObjectCreateMap() {
diff --git a/deps/v8/src/objects/prototype-info.h b/deps/v8/src/objects/prototype-info.h
index ab312b71a7..e4baa78fde 100644
--- a/deps/v8/src/objects/prototype-info.h
+++ b/deps/v8/src/objects/prototype-info.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/prototype-info-tq.inc"
+
// Container for metadata stored on each prototype map.
class PrototypeInfo
: public TorqueGeneratedPrototypeInfo<PrototypeInfo, Struct> {
diff --git a/deps/v8/src/objects/prototype.h b/deps/v8/src/objects/prototype.h
index cd003837ca..0a8f21819a 100644
--- a/deps/v8/src/objects/prototype.h
+++ b/deps/v8/src/objects/prototype.h
@@ -42,6 +42,8 @@ class PrototypeIterator {
WhereToEnd where_to_end = END_AT_NULL);
~PrototypeIterator() = default;
+ PrototypeIterator(const PrototypeIterator&) = delete;
+ PrototypeIterator& operator=(const PrototypeIterator&) = delete;
inline bool HasAccess() const;
@@ -78,8 +80,6 @@ class PrototypeIterator {
WhereToEnd where_to_end_;
bool is_at_end_;
int seen_proxies_;
-
- DISALLOW_COPY_AND_ASSIGN(PrototypeIterator);
};
} // namespace internal
diff --git a/deps/v8/src/objects/regexp-match-info.h b/deps/v8/src/objects/regexp-match-info.h
index e862f687c6..9a1b03828e 100644
--- a/deps/v8/src/objects/regexp-match-info.h
+++ b/deps/v8/src/objects/regexp-match-info.h
@@ -8,6 +8,7 @@
#include "src/base/compiler-specific.h"
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
+#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -18,6 +19,8 @@ namespace internal {
class Object;
class String;
+#include "torque-generated/src/objects/regexp-match-info-tq.inc"
+
// The property RegExpMatchInfo includes the matchIndices
// array of the last successful regexp match (an array of start/end index
// pairs for the match and all the captured substrings), the invariant is
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index 01a4964bdb..6f9f944b68 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -1044,6 +1044,24 @@ std::ostream& operator<<(std::ostream& os, VariableAllocationInfo var_info) {
}
template <typename LocalIsolate>
+Handle<ModuleRequest> ModuleRequest::New(LocalIsolate* isolate,
+ Handle<String> specifier,
+ Handle<FixedArray> import_assertions) {
+ Handle<ModuleRequest> result = Handle<ModuleRequest>::cast(
+ isolate->factory()->NewStruct(MODULE_REQUEST_TYPE, AllocationType::kOld));
+ result->set_specifier(*specifier);
+ result->set_import_assertions(*import_assertions);
+ return result;
+}
+
+template Handle<ModuleRequest> ModuleRequest::New(
+ Isolate* isolate, Handle<String> specifier,
+ Handle<FixedArray> import_assertions);
+template Handle<ModuleRequest> ModuleRequest::New(
+ LocalIsolate* isolate, Handle<String> specifier,
+ Handle<FixedArray> import_assertions);
+
+template <typename LocalIsolate>
Handle<SourceTextModuleInfoEntry> SourceTextModuleInfoEntry::New(
LocalIsolate* isolate, Handle<PrimitiveHeapObject> export_name,
Handle<PrimitiveHeapObject> local_name,
@@ -1082,7 +1100,9 @@ Handle<SourceTextModuleInfo> SourceTextModuleInfo::New(
Handle<FixedArray> module_request_positions =
isolate->factory()->NewFixedArray(size);
for (const auto& elem : descr->module_requests()) {
- module_requests->set(elem.second.index, *elem.first->string());
+ Handle<ModuleRequest> serialized_module_request =
+ elem.first->Serialize(isolate);
+ module_requests->set(elem.second.index, *serialized_module_request);
module_request_positions->set(elem.second.index,
Smi::FromInt(elem.second.position));
}
diff --git a/deps/v8/src/objects/script-inl.h b/deps/v8/src/objects/script-inl.h
index aaa1910ceb..00c8bb0e2e 100644
--- a/deps/v8/src/objects/script-inl.h
+++ b/deps/v8/src/objects/script-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/script-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(Script)
NEVER_READ_ONLY_SPACE_IMPL(Script)
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index d9caff4bb6..6e3e633f53 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -20,6 +20,8 @@ namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/script-tq.inc"
+
// Script describes a script which has been added to the VM.
class Script : public TorqueGeneratedScript<Script, Struct> {
public:
@@ -172,11 +174,12 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
class V8_EXPORT_PRIVATE Iterator {
public:
explicit Iterator(Isolate* isolate);
+ Iterator(const Iterator&) = delete;
+ Iterator& operator=(const Iterator&) = delete;
Script Next();
private:
WeakArrayList::Iterator iterator_;
- DISALLOW_COPY_AND_ASSIGN(Iterator);
};
// Dispatched behavior.
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 17677106d9..caf14e8bc3 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -22,6 +22,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/shared-function-info-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(PreparseData)
int PreparseData::inner_start_offset() const {
@@ -57,7 +59,7 @@ void PreparseData::set(int index, byte value) {
void PreparseData::copy_in(int index, const byte* buffer, int length) {
DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
index + length <= this->data_length());
- Address dst_addr = FIELD_ADDR(*this, kDataStartOffset + index * kByteSize);
+ Address dst_addr = field_address(kDataStartOffset + index * kByteSize);
memcpy(reinterpret_cast<void*>(dst_addr), buffer, length);
}
@@ -97,12 +99,12 @@ NEVER_READ_ONLY_SPACE_IMPL(SharedFunctionInfo)
CAST_ACCESSOR(SharedFunctionInfo)
DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
-SYNCHRONIZED_ACCESSORS(SharedFunctionInfo, function_data, Object,
- kFunctionDataOffset)
-ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object,
- kNameOrScopeInfoOffset)
-ACCESSORS(SharedFunctionInfo, script_or_debug_info, HeapObject,
- kScriptOrDebugInfoOffset)
+RELEASE_ACQUIRE_ACCESSORS(SharedFunctionInfo, function_data, Object,
+ kFunctionDataOffset)
+RELEASE_ACQUIRE_ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object,
+ kNameOrScopeInfoOffset)
+RELEASE_ACQUIRE_ACCESSORS(SharedFunctionInfo, script_or_debug_info, HeapObject,
+ kScriptOrDebugInfoOffset)
INT32_ACCESSORS(SharedFunctionInfo, function_literal_id,
kFunctionLiteralIdOffset)
@@ -121,7 +123,7 @@ RELAXED_INT32_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
UINT8_ACCESSORS(SharedFunctionInfo, flags2, kFlags2Offset)
bool SharedFunctionInfo::HasSharedName() const {
- Object value = name_or_scope_info();
+ Object value = name_or_scope_info(kAcquireLoad);
if (value.IsScopeInfo()) {
return ScopeInfo::cast(value).HasSharedFunctionName();
}
@@ -130,7 +132,7 @@ bool SharedFunctionInfo::HasSharedName() const {
String SharedFunctionInfo::Name() const {
if (!HasSharedName()) return GetReadOnlyRoots().empty_string();
- Object value = name_or_scope_info();
+ Object value = name_or_scope_info(kAcquireLoad);
if (value.IsScopeInfo()) {
if (ScopeInfo::cast(value).HasFunctionName()) {
return String::cast(ScopeInfo::cast(value).FunctionName());
@@ -141,13 +143,13 @@ String SharedFunctionInfo::Name() const {
}
void SharedFunctionInfo::SetName(String name) {
- Object maybe_scope_info = name_or_scope_info();
+ Object maybe_scope_info = name_or_scope_info(kAcquireLoad);
if (maybe_scope_info.IsScopeInfo()) {
ScopeInfo::cast(maybe_scope_info).SetFunctionName(name);
} else {
DCHECK(maybe_scope_info.IsString() ||
maybe_scope_info == kNoSharedNameSentinel);
- set_name_or_scope_info(name);
+ set_name_or_scope_info(name, kReleaseStore);
}
UpdateFunctionMapIndex();
}
@@ -219,9 +221,6 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
is_oneshot_iife_or_properties_are_final,
SharedFunctionInfo::IsOneshotIifeOrPropertiesAreFinalBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
- is_safe_to_skip_arguments_adaptor,
- SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
private_name_lookup_skips_outer_class,
SharedFunctionInfo::PrivateNameLookupSkipsOuterClassBit)
@@ -334,17 +333,17 @@ void SharedFunctionInfo::DontAdaptArguments() {
bool SharedFunctionInfo::IsInterpreted() const { return HasBytecodeArray(); }
ScopeInfo SharedFunctionInfo::scope_info() const {
- Object maybe_scope_info = name_or_scope_info();
+ Object maybe_scope_info = name_or_scope_info(kAcquireLoad);
if (maybe_scope_info.IsScopeInfo()) {
return ScopeInfo::cast(maybe_scope_info);
}
return GetReadOnlyRoots().empty_scope_info();
}
-void SharedFunctionInfo::set_scope_info(ScopeInfo scope_info,
- WriteBarrierMode mode) {
+void SharedFunctionInfo::SetScopeInfo(ScopeInfo scope_info,
+ WriteBarrierMode mode) {
// Move the existing name onto the ScopeInfo.
- Object name = name_or_scope_info();
+ Object name = name_or_scope_info(kAcquireLoad);
if (name.IsScopeInfo()) {
name = ScopeInfo::cast(name).FunctionName();
}
@@ -354,7 +353,7 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo scope_info,
if (HasInferredName() && inferred_name().length() != 0) {
scope_info.SetInferredFunctionName(inferred_name());
}
- set_raw_scope_info(scope_info, mode);
+ set_name_or_scope_info(scope_info, kReleaseStore, mode);
}
void SharedFunctionInfo::set_raw_scope_info(ScopeInfo scope_info,
@@ -415,7 +414,7 @@ void SharedFunctionInfo::set_feedback_metadata(FeedbackMetadata value,
}
bool SharedFunctionInfo::is_compiled() const {
- Object data = function_data();
+ Object data = function_data(kAcquireLoad);
return data != Smi::FromEnum(Builtins::kCompileLazy) &&
!data.IsUncompiledData();
}
@@ -450,56 +449,60 @@ bool SharedFunctionInfo::has_simple_parameters() {
}
bool SharedFunctionInfo::IsApiFunction() const {
- return function_data().IsFunctionTemplateInfo();
+ return function_data(kAcquireLoad).IsFunctionTemplateInfo();
}
FunctionTemplateInfo SharedFunctionInfo::get_api_func_data() const {
DCHECK(IsApiFunction());
- return FunctionTemplateInfo::cast(function_data());
+ return FunctionTemplateInfo::cast(function_data(kAcquireLoad));
}
bool SharedFunctionInfo::HasBytecodeArray() const {
- return function_data().IsBytecodeArray() ||
- function_data().IsInterpreterData();
+ Object data = function_data(kAcquireLoad);
+ return data.IsBytecodeArray() || data.IsInterpreterData();
}
BytecodeArray SharedFunctionInfo::GetBytecodeArray() const {
DCHECK(HasBytecodeArray());
if (HasDebugInfo() && GetDebugInfo().HasInstrumentedBytecodeArray()) {
return GetDebugInfo().OriginalBytecodeArray();
- } else if (function_data().IsBytecodeArray()) {
- return BytecodeArray::cast(function_data());
+ }
+
+ Object data = function_data(kAcquireLoad);
+ if (data.IsBytecodeArray()) {
+ return BytecodeArray::cast(data);
} else {
- DCHECK(function_data().IsInterpreterData());
- return InterpreterData::cast(function_data()).bytecode_array();
+ DCHECK(data.IsInterpreterData());
+ return InterpreterData::cast(data).bytecode_array();
}
}
BytecodeArray SharedFunctionInfo::GetDebugBytecodeArray() const {
- DCHECK(HasBytecodeArray());
DCHECK(HasDebugInfo() && GetDebugInfo().HasInstrumentedBytecodeArray());
- if (function_data().IsBytecodeArray()) {
- return BytecodeArray::cast(function_data());
+
+ Object data = function_data(kAcquireLoad);
+ if (data.IsBytecodeArray()) {
+ return BytecodeArray::cast(data);
} else {
- DCHECK(function_data().IsInterpreterData());
- return InterpreterData::cast(function_data()).bytecode_array();
+ DCHECK(data.IsInterpreterData());
+ return InterpreterData::cast(data).bytecode_array();
}
}
void SharedFunctionInfo::SetDebugBytecodeArray(BytecodeArray bytecode) {
- DCHECK(HasBytecodeArray());
- if (function_data().IsBytecodeArray()) {
- set_function_data(bytecode);
+ Object data = function_data(kAcquireLoad);
+ if (data.IsBytecodeArray()) {
+ set_function_data(bytecode, kReleaseStore);
} else {
- DCHECK(function_data().IsInterpreterData());
+ DCHECK(data.IsInterpreterData());
interpreter_data().set_bytecode_array(bytecode);
}
}
void SharedFunctionInfo::set_bytecode_array(BytecodeArray bytecode) {
- DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy) ||
+ DCHECK(function_data(kAcquireLoad) == Smi::FromEnum(Builtins::kCompileLazy) ||
HasUncompiledData());
- set_function_data(bytecode);
+ set_function_data(bytecode, kReleaseStore);
}
bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) {
@@ -513,7 +516,7 @@ bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) {
// Get a snapshot of the function data field, and if it is a bytecode array,
// check if it is old. Note, this is done this way since this function can be
// called by the concurrent marker.
- Object data = function_data();
+ Object data = function_data(kAcquireLoad);
if (!data.IsBytecodeArray()) return false;
if (mode == BytecodeFlushMode::kStressFlushBytecode) return true;
@@ -529,86 +532,87 @@ Code SharedFunctionInfo::InterpreterTrampoline() const {
}
bool SharedFunctionInfo::HasInterpreterData() const {
- return function_data().IsInterpreterData();
+ return function_data(kAcquireLoad).IsInterpreterData();
}
InterpreterData SharedFunctionInfo::interpreter_data() const {
DCHECK(HasInterpreterData());
- return InterpreterData::cast(function_data());
+ return InterpreterData::cast(function_data(kAcquireLoad));
}
void SharedFunctionInfo::set_interpreter_data(
InterpreterData interpreter_data) {
DCHECK(FLAG_interpreted_frames_native_stack);
- set_function_data(interpreter_data);
+ set_function_data(interpreter_data, kReleaseStore);
}
bool SharedFunctionInfo::HasAsmWasmData() const {
- return function_data().IsAsmWasmData();
+ return function_data(kAcquireLoad).IsAsmWasmData();
}
AsmWasmData SharedFunctionInfo::asm_wasm_data() const {
DCHECK(HasAsmWasmData());
- return AsmWasmData::cast(function_data());
+ return AsmWasmData::cast(function_data(kAcquireLoad));
}
void SharedFunctionInfo::set_asm_wasm_data(AsmWasmData data) {
- DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy) ||
+ DCHECK(function_data(kAcquireLoad) == Smi::FromEnum(Builtins::kCompileLazy) ||
HasUncompiledData() || HasAsmWasmData());
- set_function_data(data);
+ set_function_data(data, kReleaseStore);
}
bool SharedFunctionInfo::HasBuiltinId() const {
- return function_data().IsSmi();
+ return function_data(kAcquireLoad).IsSmi();
}
int SharedFunctionInfo::builtin_id() const {
DCHECK(HasBuiltinId());
- int id = Smi::ToInt(function_data());
+ int id = Smi::ToInt(function_data(kAcquireLoad));
DCHECK(Builtins::IsBuiltinId(id));
return id;
}
void SharedFunctionInfo::set_builtin_id(int builtin_id) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
- set_function_data(Smi::FromInt(builtin_id), SKIP_WRITE_BARRIER);
+ set_function_data(Smi::FromInt(builtin_id), kReleaseStore,
+ SKIP_WRITE_BARRIER);
}
bool SharedFunctionInfo::HasUncompiledData() const {
- return function_data().IsUncompiledData();
+ return function_data(kAcquireLoad).IsUncompiledData();
}
UncompiledData SharedFunctionInfo::uncompiled_data() const {
DCHECK(HasUncompiledData());
- return UncompiledData::cast(function_data());
+ return UncompiledData::cast(function_data(kAcquireLoad));
}
void SharedFunctionInfo::set_uncompiled_data(UncompiledData uncompiled_data) {
- DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy) ||
+ DCHECK(function_data(kAcquireLoad) == Smi::FromEnum(Builtins::kCompileLazy) ||
HasUncompiledData());
DCHECK(uncompiled_data.IsUncompiledData());
- set_function_data(uncompiled_data);
+ set_function_data(uncompiled_data, kReleaseStore);
}
bool SharedFunctionInfo::HasUncompiledDataWithPreparseData() const {
- return function_data().IsUncompiledDataWithPreparseData();
+ return function_data(kAcquireLoad).IsUncompiledDataWithPreparseData();
}
UncompiledDataWithPreparseData
SharedFunctionInfo::uncompiled_data_with_preparse_data() const {
DCHECK(HasUncompiledDataWithPreparseData());
- return UncompiledDataWithPreparseData::cast(function_data());
+ return UncompiledDataWithPreparseData::cast(function_data(kAcquireLoad));
}
void SharedFunctionInfo::set_uncompiled_data_with_preparse_data(
UncompiledDataWithPreparseData uncompiled_data_with_preparse_data) {
- DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy));
+ DCHECK(function_data(kAcquireLoad) == Smi::FromEnum(Builtins::kCompileLazy));
DCHECK(uncompiled_data_with_preparse_data.IsUncompiledDataWithPreparseData());
- set_function_data(uncompiled_data_with_preparse_data);
+ set_function_data(uncompiled_data_with_preparse_data, kReleaseStore);
}
bool SharedFunctionInfo::HasUncompiledDataWithoutPreparseData() const {
- return function_data().IsUncompiledDataWithoutPreparseData();
+ return function_data(kAcquireLoad).IsUncompiledDataWithoutPreparseData();
}
void SharedFunctionInfo::ClearPreparseData() {
@@ -670,19 +674,19 @@ void UncompiledDataWithPreparseData::Init(LocalIsolate* isolate,
}
bool SharedFunctionInfo::HasWasmExportedFunctionData() const {
- return function_data().IsWasmExportedFunctionData();
+ return function_data(kAcquireLoad).IsWasmExportedFunctionData();
}
bool SharedFunctionInfo::HasWasmJSFunctionData() const {
- return function_data().IsWasmJSFunctionData();
+ return function_data(kAcquireLoad).IsWasmJSFunctionData();
}
bool SharedFunctionInfo::HasWasmCapiFunctionData() const {
- return function_data().IsWasmCapiFunctionData();
+ return function_data(kAcquireLoad).IsWasmCapiFunctionData();
}
HeapObject SharedFunctionInfo::script() const {
- HeapObject maybe_script = script_or_debug_info();
+ HeapObject maybe_script = script_or_debug_info(kAcquireLoad);
if (maybe_script.IsDebugInfo()) {
return DebugInfo::cast(maybe_script).script();
}
@@ -690,11 +694,11 @@ HeapObject SharedFunctionInfo::script() const {
}
void SharedFunctionInfo::set_script(HeapObject script) {
- HeapObject maybe_debug_info = script_or_debug_info();
+ HeapObject maybe_debug_info = script_or_debug_info(kAcquireLoad);
if (maybe_debug_info.IsDebugInfo()) {
DebugInfo::cast(maybe_debug_info).set_script(script);
} else {
- set_script_or_debug_info(script);
+ set_script_or_debug_info(script, kReleaseStore);
}
}
@@ -703,22 +707,23 @@ bool SharedFunctionInfo::is_repl_mode() const {
}
bool SharedFunctionInfo::HasDebugInfo() const {
- return script_or_debug_info().IsDebugInfo();
+ return script_or_debug_info(kAcquireLoad).IsDebugInfo();
}
DebugInfo SharedFunctionInfo::GetDebugInfo() const {
- DCHECK(HasDebugInfo());
- return DebugInfo::cast(script_or_debug_info());
+ auto debug_info = script_or_debug_info(kAcquireLoad);
+ DCHECK(debug_info.IsDebugInfo());
+ return DebugInfo::cast(debug_info);
}
void SharedFunctionInfo::SetDebugInfo(DebugInfo debug_info) {
DCHECK(!HasDebugInfo());
- DCHECK_EQ(debug_info.script(), script_or_debug_info());
- set_script_or_debug_info(debug_info);
+ DCHECK_EQ(debug_info.script(), script_or_debug_info(kAcquireLoad));
+ set_script_or_debug_info(debug_info, kReleaseStore);
}
bool SharedFunctionInfo::HasInferredName() {
- Object scope_info = name_or_scope_info();
+ Object scope_info = name_or_scope_info(kAcquireLoad);
if (scope_info.IsScopeInfo()) {
return ScopeInfo::cast(scope_info).HasInferredFunctionName();
}
@@ -726,7 +731,7 @@ bool SharedFunctionInfo::HasInferredName() {
}
String SharedFunctionInfo::inferred_name() {
- Object maybe_scope_info = name_or_scope_info();
+ Object maybe_scope_info = name_or_scope_info(kAcquireLoad);
if (maybe_scope_info.IsScopeInfo()) {
ScopeInfo scope_info = ScopeInfo::cast(maybe_scope_info);
if (scope_info.HasInferredFunctionName()) {
diff --git a/deps/v8/src/objects/shared-function-info.cc b/deps/v8/src/objects/shared-function-info.cc
index e920425d24..885d88e689 100644
--- a/deps/v8/src/objects/shared-function-info.cc
+++ b/deps/v8/src/objects/shared-function-info.cc
@@ -36,14 +36,15 @@ void SharedFunctionInfo::Init(ReadOnlyRoots ro_roots, int unique_id) {
// Set the name to the no-name sentinel, this can be updated later.
set_name_or_scope_info(SharedFunctionInfo::kNoSharedNameSentinel,
- SKIP_WRITE_BARRIER);
+ kReleaseStore, SKIP_WRITE_BARRIER);
// Generally functions won't have feedback, unless they have been created
// from a FunctionLiteral. Those can just reset this field to keep the
// SharedFunctionInfo in a consistent state.
set_raw_outer_scope_info_or_feedback_metadata(ro_roots.the_hole_value(),
SKIP_WRITE_BARRIER);
- set_script_or_debug_info(ro_roots.undefined_value(), SKIP_WRITE_BARRIER);
+ set_script_or_debug_info(ro_roots.undefined_value(), kReleaseStore,
+ SKIP_WRITE_BARRIER);
set_function_literal_id(kFunctionLiteralIdInvalid);
#if V8_SFI_HAS_UNIQUE_ID
set_unique_id(unique_id);
@@ -72,7 +73,7 @@ Code SharedFunctionInfo::GetCode() const {
// ======
Isolate* isolate = GetIsolate();
- Object data = function_data();
+ Object data = function_data(kAcquireLoad);
if (data.IsSmi()) {
// Holding a Smi means we are a builtin.
DCHECK(HasBuiltinId());
@@ -113,17 +114,17 @@ Code SharedFunctionInfo::GetCode() const {
WasmExportedFunctionData SharedFunctionInfo::wasm_exported_function_data()
const {
DCHECK(HasWasmExportedFunctionData());
- return WasmExportedFunctionData::cast(function_data());
+ return WasmExportedFunctionData::cast(function_data(kAcquireLoad));
}
WasmJSFunctionData SharedFunctionInfo::wasm_js_function_data() const {
DCHECK(HasWasmJSFunctionData());
- return WasmJSFunctionData::cast(function_data());
+ return WasmJSFunctionData::cast(function_data(kAcquireLoad));
}
WasmCapiFunctionData SharedFunctionInfo::wasm_capi_function_data() const {
DCHECK(HasWasmCapiFunctionData());
- return WasmCapiFunctionData::cast(function_data());
+ return WasmCapiFunctionData::cast(function_data(kAcquireLoad));
}
SharedFunctionInfo::ScriptIterator::ScriptIterator(Isolate* isolate,
@@ -310,7 +311,7 @@ void SharedFunctionInfo::DiscardCompiled(
Handle<UncompiledData> data =
isolate->factory()->NewUncompiledDataWithoutPreparseData(
inferred_name_val, start_position, end_position);
- shared_info->set_function_data(*data);
+ shared_info->set_function_data(*data, kReleaseStore);
}
}
@@ -450,7 +451,7 @@ template <typename LocalIsolate>
void SharedFunctionInfo::InitFromFunctionLiteral(
LocalIsolate* isolate, Handle<SharedFunctionInfo> shared_info,
FunctionLiteral* lit, bool is_toplevel) {
- DCHECK(!shared_info->name_or_scope_info().IsScopeInfo());
+ DCHECK(!shared_info->name_or_scope_info(kAcquireLoad).IsScopeInfo());
// When adding fields here, make sure DeclarationScope::AnalyzePartially is
// updated accordingly.
@@ -497,8 +498,6 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
if (lit->ShouldEagerCompile()) {
shared_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
shared_info->UpdateAndFinalizeExpectedNofPropertiesFromEstimate(lit);
- shared_info->set_is_safe_to_skip_arguments_adaptor(
- lit->SafeToSkipArgumentsAdaptor());
DCHECK_NULL(lit->produced_preparse_data());
// If we're about to eager compile, we'll have the function literal
@@ -506,7 +505,6 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
return;
}
- shared_info->set_is_safe_to_skip_arguments_adaptor(false);
shared_info->UpdateExpectedNofPropertiesFromEstimate(lit);
Handle<UncompiledData> data;
@@ -593,7 +591,7 @@ void SharedFunctionInfo::SetFunctionTokenPosition(int function_token_position,
}
int SharedFunctionInfo::StartPosition() const {
- Object maybe_scope_info = name_or_scope_info();
+ Object maybe_scope_info = name_or_scope_info(kAcquireLoad);
if (maybe_scope_info.IsScopeInfo()) {
ScopeInfo info = ScopeInfo::cast(maybe_scope_info);
if (info.HasPositionInfo()) {
@@ -618,7 +616,7 @@ int SharedFunctionInfo::StartPosition() const {
}
int SharedFunctionInfo::EndPosition() const {
- Object maybe_scope_info = name_or_scope_info();
+ Object maybe_scope_info = name_or_scope_info(kAcquireLoad);
if (maybe_scope_info.IsScopeInfo()) {
ScopeInfo info = ScopeInfo::cast(maybe_scope_info);
if (info.HasPositionInfo()) {
@@ -643,7 +641,7 @@ int SharedFunctionInfo::EndPosition() const {
}
void SharedFunctionInfo::SetPosition(int start_position, int end_position) {
- Object maybe_scope_info = name_or_scope_info();
+ Object maybe_scope_info = name_or_scope_info(kAcquireLoad);
if (maybe_scope_info.IsScopeInfo()) {
ScopeInfo info = ScopeInfo::cast(maybe_scope_info);
if (info.HasPositionInfo()) {
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index e195f99771..be6705e327 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -38,6 +38,8 @@ class WasmCapiFunctionData;
class WasmExportedFunctionData;
class WasmJSFunctionData;
+#include "torque-generated/src/objects/shared-function-info-tq.inc"
+
// Data collected by the pre-parser storing information about scopes and inner
// functions.
//
@@ -215,13 +217,15 @@ class SharedFunctionInfo : public HeapObject {
static const int kNotFound = -1;
- // [scope_info]: Scope info.
- DECL_ACCESSORS(scope_info, ScopeInfo)
+ DECL_GETTER(scope_info, ScopeInfo)
// Set scope_info without moving the existing name onto the ScopeInfo.
inline void set_raw_scope_info(ScopeInfo scope_info,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void SetScopeInfo(ScopeInfo scope_info,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
inline bool is_script() const;
inline bool needs_script_context() const;
@@ -304,7 +308,7 @@ class SharedFunctionInfo : public HeapObject {
// - a UncompiledDataWithPreparseData for lazy compilation
// [HasUncompiledDataWithPreparseData()]
// - a WasmExportedFunctionData for Wasm [HasWasmExportedFunctionData()]
- DECL_ACCESSORS(function_data, Object)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(function_data, Object)
inline bool IsApiFunction() const;
inline bool is_class_constructor() const;
@@ -337,7 +341,8 @@ class SharedFunctionInfo : public HeapObject {
UncompiledDataWithPreparseData data);
inline bool HasUncompiledDataWithoutPreparseData() const;
inline bool HasWasmExportedFunctionData() const;
- WasmExportedFunctionData wasm_exported_function_data() const;
+ V8_EXPORT_PRIVATE WasmExportedFunctionData
+ wasm_exported_function_data() const;
inline bool HasWasmJSFunctionData() const;
WasmJSFunctionData wasm_js_function_data() const;
inline bool HasWasmCapiFunctionData() const;
@@ -373,7 +378,7 @@ class SharedFunctionInfo : public HeapObject {
// [script_or_debug_info]: One of:
// - Script from which the function originates.
// - a DebugInfo which holds the actual script [HasDebugInfo()].
- DECL_ACCESSORS(script_or_debug_info, HeapObject)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(script_or_debug_info, HeapObject)
inline HeapObject script() const;
inline void set_script(HeapObject script);
@@ -463,17 +468,6 @@ class SharedFunctionInfo : public HeapObject {
// Whether or not the number of expected properties may change.
DECL_BOOLEAN_ACCESSORS(are_properties_final)
- // Indicates that the function represented by the shared function info
- // cannot observe the actual parameters passed at a call site, which
- // means the function doesn't use the arguments object, doesn't use
- // rest parameters, and is also in strict mode (meaning that there's
- // no way to get to the actual arguments via the non-standard "arguments"
- // accessor on sloppy mode functions). This can be used to speed up calls
- // to this function even in the presence of arguments mismatch.
- // See http://bit.ly/v8-faster-calls-with-arguments-mismatch for more
- // information on this.
- DECL_BOOLEAN_ACCESSORS(is_safe_to_skip_arguments_adaptor)
-
// Indicates that the function has been reported for binary code coverage.
DECL_BOOLEAN_ACCESSORS(has_reported_binary_coverage)
@@ -619,6 +613,8 @@ class SharedFunctionInfo : public HeapObject {
public:
V8_EXPORT_PRIVATE ScriptIterator(Isolate* isolate, Script script);
explicit ScriptIterator(Handle<WeakFixedArray> shared_function_infos);
+ ScriptIterator(const ScriptIterator&) = delete;
+ ScriptIterator& operator=(const ScriptIterator&) = delete;
V8_EXPORT_PRIVATE SharedFunctionInfo Next();
int CurrentIndex() const { return index_ - 1; }
@@ -628,7 +624,6 @@ class SharedFunctionInfo : public HeapObject {
private:
Handle<WeakFixedArray> shared_function_infos_;
int index_;
- DISALLOW_COPY_AND_ASSIGN(ScriptIterator);
};
DECL_CAST(SharedFunctionInfo)
@@ -665,7 +660,7 @@ class SharedFunctionInfo : public HeapObject {
// [name_or_scope_info]: Function name string, kNoSharedNameSentinel or
// ScopeInfo.
- DECL_ACCESSORS(name_or_scope_info, Object)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(name_or_scope_info, Object)
// [outer scope info] The outer scope info, needed to lazily parse this
// function.
diff --git a/deps/v8/src/objects/shared-function-info.tq b/deps/v8/src/objects/shared-function-info.tq
index 17ec1f2fea..838703454c 100644
--- a/deps/v8/src/objects/shared-function-info.tq
+++ b/deps/v8/src/objects/shared-function-info.tq
@@ -37,7 +37,6 @@ bitfield struct SharedFunctionInfoFlags extends uint32 {
has_reported_binary_coverage: bool: 1 bit;
is_top_level: bool: 1 bit;
is_oneshot_iife_or_properties_are_final: bool: 1 bit;
- is_safe_to_skip_arguments_adaptor: bool: 1 bit;
private_name_lookup_skips_outer_class: bool: 1 bit;
}
diff --git a/deps/v8/src/objects/slots-inl.h b/deps/v8/src/objects/slots-inl.h
index bd243cd8ba..2943c117c7 100644
--- a/deps/v8/src/objects/slots-inl.h
+++ b/deps/v8/src/objects/slots-inl.h
@@ -31,7 +31,7 @@ bool FullObjectSlot::contains_value(Address raw_value) const {
Object FullObjectSlot::operator*() const { return Object(*location()); }
-Object FullObjectSlot::load(const Isolate* isolate) const { return **this; }
+Object FullObjectSlot::load(IsolateRoot isolate) const { return **this; }
void FullObjectSlot::store(Object value) const { *location() = value.ptr(); }
@@ -39,7 +39,7 @@ Object FullObjectSlot::Acquire_Load() const {
return Object(base::AsAtomicPointer::Acquire_Load(location()));
}
-Object FullObjectSlot::Acquire_Load(const Isolate* isolate) const {
+Object FullObjectSlot::Acquire_Load(IsolateRoot isolate) const {
return Acquire_Load();
}
@@ -47,7 +47,7 @@ Object FullObjectSlot::Relaxed_Load() const {
return Object(base::AsAtomicPointer::Relaxed_Load(location()));
}
-Object FullObjectSlot::Relaxed_Load(const Isolate* isolate) const {
+Object FullObjectSlot::Relaxed_Load(IsolateRoot isolate) const {
return Relaxed_Load();
}
@@ -79,7 +79,7 @@ MaybeObject FullMaybeObjectSlot::operator*() const {
return MaybeObject(*location());
}
-MaybeObject FullMaybeObjectSlot::load(const Isolate* isolate) const {
+MaybeObject FullMaybeObjectSlot::load(IsolateRoot isolate) const {
return **this;
}
@@ -91,7 +91,7 @@ MaybeObject FullMaybeObjectSlot::Relaxed_Load() const {
return MaybeObject(base::AsAtomicPointer::Relaxed_Load(location()));
}
-MaybeObject FullMaybeObjectSlot::Relaxed_Load(const Isolate* isolate) const {
+MaybeObject FullMaybeObjectSlot::Relaxed_Load(IsolateRoot isolate) const {
return Relaxed_Load();
}
@@ -113,7 +113,7 @@ HeapObjectReference FullHeapObjectSlot::operator*() const {
return HeapObjectReference(*location());
}
-HeapObjectReference FullHeapObjectSlot::load(const Isolate* isolate) const {
+HeapObjectReference FullHeapObjectSlot::load(IsolateRoot isolate) const {
return **this;
}
diff --git a/deps/v8/src/objects/slots.h b/deps/v8/src/objects/slots.h
index cb726eba46..2221fb41c8 100644
--- a/deps/v8/src/objects/slots.h
+++ b/deps/v8/src/objects/slots.h
@@ -110,13 +110,13 @@ class FullObjectSlot : public SlotBase<FullObjectSlot, Address> {
inline bool contains_value(Address raw_value) const;
inline Object operator*() const;
- inline Object load(const Isolate* isolate) const;
+ inline Object load(IsolateRoot isolate) const;
inline void store(Object value) const;
inline Object Acquire_Load() const;
- inline Object Acquire_Load(const Isolate* isolate) const;
+ inline Object Acquire_Load(IsolateRoot isolate) const;
inline Object Relaxed_Load() const;
- inline Object Relaxed_Load(const Isolate* isolate) const;
+ inline Object Relaxed_Load(IsolateRoot isolate) const;
inline void Relaxed_Store(Object value) const;
inline void Release_Store(Object value) const;
inline Object Relaxed_CompareAndSwap(Object old, Object target) const;
@@ -147,11 +147,11 @@ class FullMaybeObjectSlot
: SlotBase(slot.address()) {}
inline MaybeObject operator*() const;
- inline MaybeObject load(const Isolate* isolate) const;
+ inline MaybeObject load(IsolateRoot isolate) const;
inline void store(MaybeObject value) const;
inline MaybeObject Relaxed_Load() const;
- inline MaybeObject Relaxed_Load(const Isolate* isolate) const;
+ inline MaybeObject Relaxed_Load(IsolateRoot isolate) const;
inline void Relaxed_Store(MaybeObject value) const;
inline void Release_CompareAndSwap(MaybeObject old, MaybeObject target) const;
};
@@ -174,7 +174,7 @@ class FullHeapObjectSlot : public SlotBase<FullHeapObjectSlot, Address> {
: SlotBase(slot.address()) {}
inline HeapObjectReference operator*() const;
- inline HeapObjectReference load(const Isolate* isolate) const;
+ inline HeapObjectReference load(IsolateRoot isolate) const;
inline void store(HeapObjectReference value) const;
inline HeapObject ToHeapObject() const;
diff --git a/deps/v8/src/objects/source-text-module-inl.h b/deps/v8/src/objects/source-text-module-inl.h
new file mode 100644
index 0000000000..20c80a1799
--- /dev/null
+++ b/deps/v8/src/objects/source-text-module-inl.h
@@ -0,0 +1,29 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SOURCE_TEXT_MODULE_INL_H_
+#define V8_OBJECTS_SOURCE_TEXT_MODULE_INL_H_
+
+#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/source-text-module.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/source-text-module-tq-inl.inc"
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(ModuleRequest)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SourceTextModule)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SourceTextModuleInfoEntry)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_SOURCE_TEXT_MODULE_INL_H_
diff --git a/deps/v8/src/objects/source-text-module.cc b/deps/v8/src/objects/source-text-module.cc
index fb29c18e2f..e3c7a2d6cd 100644
--- a/deps/v8/src/objects/source-text-module.cc
+++ b/deps/v8/src/objects/source-text-module.cc
@@ -189,7 +189,7 @@ MaybeHandle<Cell> SourceTextModule::ResolveExport(
} else if (name_set->count(export_name)) {
// Cycle detected.
if (must_resolve) {
- return isolate->Throw<Cell>(
+ return isolate->ThrowAt<Cell>(
isolate->factory()->NewSyntaxError(
MessageTemplate::kCyclicModuleDependency, export_name,
module_specifier),
@@ -234,16 +234,20 @@ MaybeHandle<Cell> SourceTextModule::ResolveExport(
MaybeHandle<Cell> SourceTextModule::ResolveImport(
Isolate* isolate, Handle<SourceTextModule> module, Handle<String> name,
- int module_request, MessageLocation loc, bool must_resolve,
+ int module_request_index, MessageLocation loc, bool must_resolve,
Module::ResolveSet* resolve_set) {
Handle<Module> requested_module(
- Module::cast(module->requested_modules().get(module_request)), isolate);
- Handle<String> specifier(
- String::cast(module->info().module_requests().get(module_request)),
+ Module::cast(module->requested_modules().get(module_request_index)),
+ isolate);
+ Handle<ModuleRequest> module_request(
+ ModuleRequest::cast(
+ module->info().module_requests().get(module_request_index)),
isolate);
+ Handle<String> module_specifier(String::cast(module_request->specifier()),
+ isolate);
MaybeHandle<Cell> result =
- Module::ResolveExport(isolate, requested_module, specifier, name, loc,
- must_resolve, resolve_set);
+ Module::ResolveExport(isolate, requested_module, module_specifier, name,
+ loc, must_resolve, resolve_set);
DCHECK_IMPLIES(isolate->has_pending_exception(), result.is_null());
return result;
}
@@ -274,10 +278,10 @@ MaybeHandle<Cell> SourceTextModule::ResolveExportUsingStarExports(
.ToHandle(&cell)) {
if (unique_cell.is_null()) unique_cell = cell;
if (*unique_cell != *cell) {
- return isolate->Throw<Cell>(isolate->factory()->NewSyntaxError(
- MessageTemplate::kAmbiguousExport,
- module_specifier, export_name),
- &loc);
+ return isolate->ThrowAt<Cell>(isolate->factory()->NewSyntaxError(
+ MessageTemplate::kAmbiguousExport,
+ module_specifier, export_name),
+ &loc);
}
} else if (isolate->has_pending_exception()) {
return MaybeHandle<Cell>();
@@ -296,7 +300,7 @@ MaybeHandle<Cell> SourceTextModule::ResolveExportUsingStarExports(
// Unresolvable.
if (must_resolve) {
- return isolate->Throw<Cell>(
+ return isolate->ThrowAt<Cell>(
isolate->factory()->NewSyntaxError(MessageTemplate::kUnresolvableExport,
module_specifier, export_name),
&loc);
@@ -312,7 +316,10 @@ bool SourceTextModule::PrepareInstantiate(
Handle<FixedArray> module_requests(module_info->module_requests(), isolate);
Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
for (int i = 0, length = module_requests->length(); i < length; ++i) {
- Handle<String> specifier(String::cast(module_requests->get(i)), isolate);
+ Handle<ModuleRequest> module_request(
+ ModuleRequest::cast(module_requests->get(i)), isolate);
+ Handle<String> specifier(module_request->specifier(), isolate);
+ // TODO(v8:10958) Pass import assertions to the callback
v8::Local<v8::Module> api_requested_module;
if (!callback(context, v8::Utils::ToLocal(specifier),
v8::Utils::ToLocal(Handle<Module>::cast(module)))
diff --git a/deps/v8/src/objects/source-text-module.h b/deps/v8/src/objects/source-text-module.h
index 7d79213f13..c3ef4e36b3 100644
--- a/deps/v8/src/objects/source-text-module.h
+++ b/deps/v8/src/objects/source-text-module.h
@@ -17,6 +17,8 @@ namespace internal {
class UnorderedModuleSet;
+#include "torque-generated/src/objects/source-text-module-tq.inc"
+
// The runtime representation of an ECMAScript Source Text Module Record.
// https://tc39.github.io/ecma262/#sec-source-text-module-records
class SourceTextModule
@@ -124,7 +126,7 @@ class SourceTextModule
MessageLocation loc, bool must_resolve, ResolveSet* resolve_set);
static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveImport(
Isolate* isolate, Handle<SourceTextModule> module, Handle<String> name,
- int module_request, MessageLocation loc, bool must_resolve,
+ int module_request_index, MessageLocation loc, bool must_resolve,
ResolveSet* resolve_set);
static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveExportUsingStarExports(
@@ -236,6 +238,20 @@ class SourceTextModuleInfo : public FixedArray {
OBJECT_CONSTRUCTORS(SourceTextModuleInfo, FixedArray);
};
+class ModuleRequest
+ : public TorqueGeneratedModuleRequest<ModuleRequest, Struct> {
+ public:
+ NEVER_READ_ONLY_SPACE
+ DECL_VERIFIER(ModuleRequest)
+
+ template <typename LocalIsolate>
+ static Handle<ModuleRequest> New(LocalIsolate* isolate,
+ Handle<String> specifier,
+ Handle<FixedArray> import_assertions);
+
+ TQ_OBJECT_CONSTRUCTORS(ModuleRequest)
+};
+
class SourceTextModuleInfoEntry
: public TorqueGeneratedSourceTextModuleInfoEntry<SourceTextModuleInfoEntry,
Struct> {
diff --git a/deps/v8/src/objects/source-text-module.tq b/deps/v8/src/objects/source-text-module.tq
index 185443414d..1fee28a31f 100644
--- a/deps/v8/src/objects/source-text-module.tq
+++ b/deps/v8/src/objects/source-text-module.tq
@@ -48,6 +48,16 @@ extern class SourceTextModule extends Module {
}
@generateCppClass
+@generatePrint
+extern class ModuleRequest extends Struct {
+ specifier: String;
+
+ // Import assertions are stored in this array in the form:
+ // [key1, value1, location1, key2, value2, location2, ...]
+ import_assertions: FixedArray;
+}
+
+@generateCppClass
extern class SourceTextModuleInfoEntry extends Struct {
export_name: String|Undefined;
local_name: String|Undefined;
diff --git a/deps/v8/src/objects/stack-frame-info-inl.h b/deps/v8/src/objects/stack-frame-info-inl.h
index 820d4324a2..376eda3a65 100644
--- a/deps/v8/src/objects/stack-frame-info-inl.h
+++ b/deps/v8/src/objects/stack-frame-info-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/stack-frame-info-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(StackFrameInfo)
NEVER_READ_ONLY_SPACE_IMPL(StackFrameInfo)
diff --git a/deps/v8/src/objects/stack-frame-info.cc b/deps/v8/src/objects/stack-frame-info.cc
index 2f07c75ecf..6fe5316631 100644
--- a/deps/v8/src/objects/stack-frame-info.cc
+++ b/deps/v8/src/objects/stack-frame-info.cc
@@ -196,6 +196,9 @@ Handle<StackFrameInfo> StackTraceFrame::GetFrameInfo(
// static
void StackTraceFrame::InitializeFrameInfo(Handle<StackTraceFrame> frame) {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
+ "SymbolizeStackFrame", "frameIndex", frame->frame_index());
+
Isolate* isolate = frame->GetIsolate();
Handle<StackFrameInfo> frame_info = isolate->factory()->NewStackFrameInfo(
handle(FrameArray::cast(frame->frame_array()), isolate),
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/stack-frame-info.h
index 837c7e4b30..a2802792fd 100644
--- a/deps/v8/src/objects/stack-frame-info.h
+++ b/deps/v8/src/objects/stack-frame-info.h
@@ -17,6 +17,8 @@ namespace internal {
class FrameArray;
class WasmInstanceObject;
+#include "torque-generated/src/objects/stack-frame-info-tq.inc"
+
class StackFrameInfo
: public TorqueGeneratedStackFrameInfo<StackFrameInfo, Struct> {
public:
diff --git a/deps/v8/src/objects/string-comparator.cc b/deps/v8/src/objects/string-comparator.cc
index 6f517edb20..79ec348c71 100644
--- a/deps/v8/src/objects/string-comparator.cc
+++ b/deps/v8/src/objects/string-comparator.cc
@@ -44,7 +44,7 @@ bool StringComparator::Equals(String string_1, String string_2) {
state_1_.Init(string_1);
state_2_.Init(string_2);
while (true) {
- int to_check = Min(state_1_.length_, state_2_.length_);
+ int to_check = std::min(state_1_.length_, state_2_.length_);
DCHECK(to_check > 0 && to_check <= length);
bool is_equal;
if (state_1_.is_one_byte_) {
diff --git a/deps/v8/src/objects/string-comparator.h b/deps/v8/src/objects/string-comparator.h
index 8cee98a642..dc58d9aeb2 100644
--- a/deps/v8/src/objects/string-comparator.h
+++ b/deps/v8/src/objects/string-comparator.h
@@ -55,6 +55,8 @@ class StringComparator {
class State {
public:
State() : is_one_byte_(true), length_(0), buffer8_(nullptr) {}
+ State(const State&) = delete;
+ State& operator=(const State&) = delete;
void Init(String string);
@@ -79,13 +81,12 @@ class StringComparator {
const uint8_t* buffer8_;
const uint16_t* buffer16_;
};
-
- private:
- DISALLOW_COPY_AND_ASSIGN(State);
};
public:
inline StringComparator() = default;
+ StringComparator(const StringComparator&) = delete;
+ StringComparator& operator=(const StringComparator&) = delete;
template <typename Chars1, typename Chars2>
static inline bool Equals(State* state_1, State* state_2, int to_check) {
@@ -99,8 +100,6 @@ class StringComparator {
private:
State state_1_;
State state_2_;
-
- DISALLOW_COPY_AND_ASSIGN(StringComparator);
};
} // namespace internal
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index e9dfc594d1..ee1afd23a4 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -24,14 +24,53 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/string-tq-inl.inc"
+
+// Creates a SharedMutexGuard<kShared> for the string access if:
+// A) {str} is not a read only string, and
+// B) We are on a background thread.
+class SharedStringAccessGuardIfNeeded {
+ public:
+ explicit SharedStringAccessGuardIfNeeded(String str) {
+ Isolate* isolate;
+ if (IsNeeded(str, &isolate)) mutex_guard.emplace(isolate->string_access());
+ }
+
+ static SharedStringAccessGuardIfNeeded NotNeeded() {
+ return SharedStringAccessGuardIfNeeded();
+ }
+
+ static bool IsNeeded(String str, Isolate** out_isolate = nullptr) {
+ Isolate* isolate;
+ if (!GetIsolateFromHeapObject(str, &isolate)) {
+ // If we can't get the isolate from the String, it must be read-only.
+ DCHECK(ReadOnlyHeap::Contains(str));
+ return false;
+ }
+ if (out_isolate) *out_isolate = isolate;
+ return ThreadId::Current() != isolate->thread_id();
+ }
+
+ private:
+ // Default constructor and move constructor required for the NotNeeded()
+ // static constructor.
+ constexpr SharedStringAccessGuardIfNeeded() = default;
+ constexpr SharedStringAccessGuardIfNeeded(SharedStringAccessGuardIfNeeded&&)
+ V8_NOEXCEPT {
+ DCHECK(!mutex_guard.has_value());
+ }
+
+ base::Optional<base::SharedMutexGuard<base::kShared>> mutex_guard;
+};
+
int String::synchronized_length() const {
return base::AsAtomic32::Acquire_Load(
- reinterpret_cast<const int32_t*>(FIELD_ADDR(*this, kLengthOffset)));
+ reinterpret_cast<const int32_t*>(field_address(kLengthOffset)));
}
void String::synchronized_set_length(int value) {
base::AsAtomic32::Release_Store(
- reinterpret_cast<int32_t*>(FIELD_ADDR(*this, kLengthOffset)), value);
+ reinterpret_cast<int32_t*>(field_address(kLengthOffset)), value);
}
TQ_OBJECT_CONSTRUCTORS_IMPL(String)
@@ -50,7 +89,8 @@ CAST_ACCESSOR(ExternalOneByteString)
CAST_ACCESSOR(ExternalString)
CAST_ACCESSOR(ExternalTwoByteString)
-StringShape::StringShape(const String str) : type_(str.map().instance_type()) {
+StringShape::StringShape(const String str)
+ : type_(str.synchronized_map().instance_type()) {
set_valid();
DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
}
@@ -237,7 +277,7 @@ uc32 FlatStringReader::Get(int index) {
template <typename Char>
Char FlatStringReader::Get(int index) {
DCHECK_EQ(is_one_byte_, sizeof(Char) == 1);
- DCHECK(0 <= index && index <= length_);
+ DCHECK(0 <= index && index < length_);
if (sizeof(Char) == 1) {
return static_cast<Char>(static_cast<const uint8_t*>(start_)[index]);
} else {
@@ -261,12 +301,13 @@ class SequentialStringKey final : public StringTableKey {
convert_(convert) {}
bool IsMatch(String s) override {
+ SharedStringAccessGuardIfNeeded access_guard(s);
DisallowHeapAllocation no_gc;
if (s.IsOneByteRepresentation()) {
- const uint8_t* chars = s.GetChars<uint8_t>(no_gc);
+ const uint8_t* chars = s.GetChars<uint8_t>(no_gc, access_guard);
return CompareChars(chars, chars_.begin(), chars_.length()) == 0;
}
- const uint16_t* chars = s.GetChars<uint16_t>(no_gc);
+ const uint16_t* chars = s.GetChars<uint16_t>(no_gc, access_guard);
return CompareChars(chars, chars_.begin(), chars_.length()) == 0;
}
@@ -392,6 +433,16 @@ const Char* String::GetChars(const DisallowHeapAllocation& no_gc) {
: CharTraits<Char>::String::cast(*this).GetChars(no_gc);
}
+template <typename Char>
+const Char* String::GetChars(
+ const DisallowHeapAllocation& no_gc,
+ const SharedStringAccessGuardIfNeeded& access_guard) {
+ return StringShape(*this).IsExternal()
+ ? CharTraits<Char>::ExternalString::cast(*this).GetChars()
+ : CharTraits<Char>::String::cast(*this).GetChars(no_gc,
+ access_guard);
+}
+
Handle<String> String::Flatten(Isolate* isolate, Handle<String> string,
AllocationType allocation) {
if (string->IsConsString()) {
@@ -419,6 +470,8 @@ Handle<String> String::Flatten(LocalIsolate* isolate, Handle<String> string,
uint16_t String::Get(int index) {
DCHECK(index >= 0 && index < length());
+ SharedStringAccessGuardIfNeeded scope(*this);
+
class StringGetDispatcher : public AllStatic {
public:
#define DEFINE_METHOD(Type) \
@@ -554,21 +607,39 @@ void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) {
}
Address SeqOneByteString::GetCharsAddress() {
- return FIELD_ADDR(*this, kHeaderSize);
+ return field_address(kHeaderSize);
}
uint8_t* SeqOneByteString::GetChars(const DisallowHeapAllocation& no_gc) {
USE(no_gc);
+ DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
+ return reinterpret_cast<uint8_t*>(GetCharsAddress());
+}
+
+uint8_t* SeqOneByteString::GetChars(
+ const DisallowHeapAllocation& no_gc,
+ const SharedStringAccessGuardIfNeeded& access_guard) {
+ USE(no_gc);
+ USE(access_guard);
return reinterpret_cast<uint8_t*>(GetCharsAddress());
}
Address SeqTwoByteString::GetCharsAddress() {
- return FIELD_ADDR(*this, kHeaderSize);
+ return field_address(kHeaderSize);
}
uc16* SeqTwoByteString::GetChars(const DisallowHeapAllocation& no_gc) {
USE(no_gc);
- return reinterpret_cast<uc16*>(FIELD_ADDR(*this, kHeaderSize));
+ DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
+ return reinterpret_cast<uc16*>(GetCharsAddress());
+}
+
+uc16* SeqTwoByteString::GetChars(
+ const DisallowHeapAllocation& no_gc,
+ const SharedStringAccessGuardIfNeeded& access_guard) {
+ USE(no_gc);
+ USE(access_guard);
+ return reinterpret_cast<uc16*>(GetCharsAddress());
}
uint16_t SeqTwoByteString::Get(int index) {
@@ -612,17 +683,20 @@ bool ExternalString::is_uncached() const {
return (type & kUncachedExternalStringMask) == kUncachedExternalStringTag;
}
+void ExternalString::AllocateExternalPointerEntries(Isolate* isolate) {
+ InitExternalPointerField(kResourceOffset, isolate);
+ if (is_uncached()) return;
+ InitExternalPointerField(kResourceDataOffset, isolate);
+}
+
DEF_GETTER(ExternalString, resource_as_address, Address) {
- ExternalPointer_t encoded_address =
- ReadField<ExternalPointer_t>(kResourceOffset);
- return DecodeExternalPointer(isolate, encoded_address);
+ return ReadExternalPointerField(kResourceOffset, isolate,
+ kExternalStringResourceTag);
}
-void ExternalString::set_address_as_resource(Isolate* isolate,
- Address address) {
- const ExternalPointer_t encoded_address =
- EncodeExternalPointer(isolate, address);
- WriteField<ExternalPointer_t>(kResourceOffset, encoded_address);
+void ExternalString::set_address_as_resource(Isolate* isolate, Address value) {
+ WriteExternalPointerField(kResourceOffset, isolate, value,
+ kExternalStringResourceTag);
if (IsExternalOneByteString()) {
ExternalOneByteString::cast(*this).update_data_cache(isolate);
} else {
@@ -630,48 +704,43 @@ void ExternalString::set_address_as_resource(Isolate* isolate,
}
}
-uint32_t ExternalString::resource_as_uint32() {
+uint32_t ExternalString::GetResourceRefForDeserialization() {
ExternalPointer_t encoded_address =
ReadField<ExternalPointer_t>(kResourceOffset);
return static_cast<uint32_t>(encoded_address);
}
-void ExternalString::set_uint32_as_resource(Isolate* isolate, uint32_t value) {
- WriteField<ExternalPointer_t>(kResourceOffset, value);
+void ExternalString::SetResourceRefForSerialization(uint32_t ref) {
+ WriteField<ExternalPointer_t>(kResourceOffset,
+ static_cast<ExternalPointer_t>(ref));
if (is_uncached()) return;
- WriteField<ExternalPointer_t>(kResourceDataOffset,
- EncodeExternalPointer(isolate, kNullAddress));
+ WriteField<ExternalPointer_t>(kResourceDataOffset, kNullExternalPointer);
}
void ExternalString::DisposeResource(Isolate* isolate) {
- const ExternalPointer_t encoded_address =
- ReadField<ExternalPointer_t>(kResourceOffset);
+ Address value = ReadExternalPointerField(kResourceOffset, isolate,
+ kExternalStringResourceTag);
v8::String::ExternalStringResourceBase* resource =
- reinterpret_cast<v8::String::ExternalStringResourceBase*>(
- DecodeExternalPointer(isolate, encoded_address));
+ reinterpret_cast<v8::String::ExternalStringResourceBase*>(value);
// Dispose of the C++ object if it has not already been disposed.
if (resource != nullptr) {
resource->Dispose();
- const ExternalPointer_t encoded_address =
- EncodeExternalPointer(isolate, kNullAddress);
- WriteField<ExternalPointer_t>(kResourceOffset, encoded_address);
+ WriteExternalPointerField(kResourceOffset, isolate, kNullAddress,
+ kExternalStringResourceTag);
}
}
DEF_GETTER(ExternalOneByteString, resource,
const ExternalOneByteString::Resource*) {
- const ExternalPointer_t encoded_address =
- ReadField<ExternalPointer_t>(kResourceOffset);
- return reinterpret_cast<Resource*>(
- DecodeExternalPointer(isolate, encoded_address));
+ return reinterpret_cast<Resource*>(resource_as_address(isolate));
}
void ExternalOneByteString::update_data_cache(Isolate* isolate) {
if (is_uncached()) return;
- const ExternalPointer_t encoded_resource_data = EncodeExternalPointer(
- isolate, reinterpret_cast<Address>(resource()->data()));
- WriteField<ExternalPointer_t>(kResourceDataOffset, encoded_resource_data);
+ WriteExternalPointerField(kResourceDataOffset, isolate,
+ reinterpret_cast<Address>(resource()->data()),
+ kExternalStringResourceDataTag);
}
void ExternalOneByteString::SetResource(
@@ -685,9 +754,9 @@ void ExternalOneByteString::SetResource(
void ExternalOneByteString::set_resource(
Isolate* isolate, const ExternalOneByteString::Resource* resource) {
- const ExternalPointer_t encoded_address =
- EncodeExternalPointer(isolate, reinterpret_cast<Address>(resource));
- WriteField<ExternalPointer_t>(kResourceOffset, encoded_address);
+ WriteExternalPointerField(kResourceOffset, isolate,
+ reinterpret_cast<Address>(resource),
+ kExternalStringResourceTag);
if (resource != nullptr) update_data_cache(isolate);
}
@@ -702,17 +771,14 @@ uint8_t ExternalOneByteString::Get(int index) {
DEF_GETTER(ExternalTwoByteString, resource,
const ExternalTwoByteString::Resource*) {
- const ExternalPointer_t encoded_address =
- ReadField<ExternalPointer_t>(kResourceOffset);
- return reinterpret_cast<Resource*>(
- DecodeExternalPointer(isolate, encoded_address));
+ return reinterpret_cast<Resource*>(resource_as_address(isolate));
}
void ExternalTwoByteString::update_data_cache(Isolate* isolate) {
if (is_uncached()) return;
- const ExternalPointer_t encoded_resource_data = EncodeExternalPointer(
- isolate, reinterpret_cast<Address>(resource()->data()));
- WriteField<ExternalPointer_t>(kResourceDataOffset, encoded_resource_data);
+ WriteExternalPointerField(kResourceDataOffset, isolate,
+ reinterpret_cast<Address>(resource()->data()),
+ kExternalStringResourceDataTag);
}
void ExternalTwoByteString::SetResource(
@@ -726,9 +792,9 @@ void ExternalTwoByteString::SetResource(
void ExternalTwoByteString::set_resource(
Isolate* isolate, const ExternalTwoByteString::Resource* resource) {
- const ExternalPointer_t encoded_address =
- EncodeExternalPointer(isolate, reinterpret_cast<Address>(resource));
- WriteField<ExternalPointer_t>(kResourceOffset, encoded_address);
+ WriteExternalPointerField(kResourceOffset, isolate,
+ reinterpret_cast<Address>(resource),
+ kExternalStringResourceTag);
if (resource != nullptr) update_data_cache(isolate);
}
diff --git a/deps/v8/src/objects/string-table.cc b/deps/v8/src/objects/string-table.cc
index ae8da8412c..85f31a2e56 100644
--- a/deps/v8/src/objects/string-table.cc
+++ b/deps/v8/src/objects/string-table.cc
@@ -51,7 +51,7 @@ int ComputeStringTableCapacity(int at_least_space_for) {
// See matching computation in StringTableHasSufficientCapacityToAdd().
int raw_capacity = at_least_space_for + (at_least_space_for >> 1);
int capacity = base::bits::RoundUpToPowerOfTwo32(raw_capacity);
- return Max(capacity, kStringTableMinCapacity);
+ return std::max(capacity, kStringTableMinCapacity);
}
int ComputeStringTableCapacityWithShrink(int current_capacity,
@@ -91,14 +91,14 @@ bool KeyIsMatch(StringTableKey* key, String string) {
class StringTable::Data {
public:
static std::unique_ptr<Data> New(int capacity);
- static std::unique_ptr<Data> Resize(const Isolate* isolate,
+ static std::unique_ptr<Data> Resize(IsolateRoot isolate,
std::unique_ptr<Data> data, int capacity);
OffHeapObjectSlot slot(InternalIndex index) const {
return OffHeapObjectSlot(&elements_[index.as_uint32()]);
}
- Object Get(const Isolate* isolate, InternalIndex index) const {
+ Object Get(IsolateRoot isolate, InternalIndex index) const {
return slot(index).Acquire_Load(isolate);
}
@@ -136,13 +136,13 @@ class StringTable::Data {
int number_of_deleted_elements() const { return number_of_deleted_elements_; }
template <typename StringTableKey>
- InternalIndex FindEntry(const Isolate* isolate, StringTableKey* key,
+ InternalIndex FindEntry(IsolateRoot isolate, StringTableKey* key,
uint32_t hash) const;
- InternalIndex FindInsertionEntry(const Isolate* isolate, uint32_t hash) const;
+ InternalIndex FindInsertionEntry(IsolateRoot isolate, uint32_t hash) const;
template <typename StringTableKey>
- InternalIndex FindEntryOrInsertionEntry(const Isolate* isolate,
+ InternalIndex FindEntryOrInsertionEntry(IsolateRoot isolate,
StringTableKey* key,
uint32_t hash) const;
@@ -157,7 +157,7 @@ class StringTable::Data {
Data* PreviousData() { return previous_data_.get(); }
void DropPreviousData() { previous_data_.reset(); }
- void Print(const Isolate* isolate) const;
+ void Print(IsolateRoot isolate) const;
size_t GetCurrentMemoryUsage() const;
private:
@@ -224,7 +224,7 @@ std::unique_ptr<StringTable::Data> StringTable::Data::New(int capacity) {
}
std::unique_ptr<StringTable::Data> StringTable::Data::Resize(
- const Isolate* isolate, std::unique_ptr<Data> data, int capacity) {
+ IsolateRoot isolate, std::unique_ptr<Data> data, int capacity) {
std::unique_ptr<Data> new_data(new (capacity) Data(capacity));
DCHECK_LT(data->number_of_elements(), new_data->capacity());
@@ -248,7 +248,7 @@ std::unique_ptr<StringTable::Data> StringTable::Data::Resize(
}
template <typename StringTableKey>
-InternalIndex StringTable::Data::FindEntry(const Isolate* isolate,
+InternalIndex StringTable::Data::FindEntry(IsolateRoot isolate,
StringTableKey* key,
uint32_t hash) const {
uint32_t count = 1;
@@ -266,7 +266,7 @@ InternalIndex StringTable::Data::FindEntry(const Isolate* isolate,
}
}
-InternalIndex StringTable::Data::FindInsertionEntry(const Isolate* isolate,
+InternalIndex StringTable::Data::FindInsertionEntry(IsolateRoot isolate,
uint32_t hash) const {
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
@@ -283,7 +283,7 @@ InternalIndex StringTable::Data::FindInsertionEntry(const Isolate* isolate,
template <typename StringTableKey>
InternalIndex StringTable::Data::FindEntryOrInsertionEntry(
- const Isolate* isolate, StringTableKey* key, uint32_t hash) const {
+ IsolateRoot isolate, StringTableKey* key, uint32_t hash) const {
InternalIndex insertion_entry = InternalIndex::NotFound();
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
@@ -317,7 +317,7 @@ void StringTable::Data::IterateElements(RootVisitor* visitor) {
visitor->VisitRootPointers(Root::kStringTable, nullptr, first_slot, end_slot);
}
-void StringTable::Data::Print(const Isolate* isolate) const {
+void StringTable::Data::Print(IsolateRoot isolate) const {
OFStream os(stdout);
os << "StringTable {" << std::endl;
for (InternalIndex i : InternalIndex::Range(capacity_)) {
@@ -358,7 +358,10 @@ class InternalizedStringKey final : public StringTableKey {
set_hash_field(string->hash_field());
}
- bool IsMatch(String string) override { return string_->SlowEquals(string); }
+ bool IsMatch(String string) override {
+ DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(string));
+ return string_->SlowEquals(string);
+ }
Handle<String> AsHandle(Isolate* isolate) {
// Internalize the string if possible.
@@ -461,8 +464,6 @@ Handle<String> StringTable::LookupKey(LocalIsolate* isolate,
// allocation if another write also did an allocation. This assumes that
// writes are rarer than reads.
- const Isolate* ptr_cmp_isolate = GetIsolateForPtrCompr(isolate);
-
Handle<String> new_string;
while (true) {
// Load the current string table data, in case another thread updates the
@@ -474,9 +475,9 @@ Handle<String> StringTable::LookupKey(LocalIsolate* isolate,
// because the new table won't delete it's corresponding entry until the
// string is dead, in which case it will die in this table too and worst
// case we'll have a false miss.
- InternalIndex entry = data->FindEntry(ptr_cmp_isolate, key, key->hash());
+ InternalIndex entry = data->FindEntry(isolate, key, key->hash());
if (entry.is_found()) {
- return handle(String::cast(data->Get(ptr_cmp_isolate, entry)), isolate);
+ return handle(String::cast(data->Get(isolate, entry)), isolate);
}
// No entry found, so adding new string.
@@ -490,14 +491,14 @@ Handle<String> StringTable::LookupKey(LocalIsolate* isolate,
{
base::MutexGuard table_write_guard(&write_mutex_);
- Data* data = EnsureCapacity(ptr_cmp_isolate, 1);
+ Data* data = EnsureCapacity(isolate, 1);
// Check one last time if the key is present in the table, in case it was
// added after the check.
InternalIndex entry =
- data->FindEntryOrInsertionEntry(ptr_cmp_isolate, key, key->hash());
+ data->FindEntryOrInsertionEntry(isolate, key, key->hash());
- Object element = data->Get(ptr_cmp_isolate, entry);
+ Object element = data->Get(isolate, entry);
if (element == empty_element()) {
// This entry is empty, so write it and register that we added an
// element.
@@ -539,7 +540,7 @@ template Handle<String> StringTable::LookupKey(LocalIsolate* isolate,
template Handle<String> StringTable::LookupKey(Isolate* isolate,
StringTableInsertionKey* key);
-StringTable::Data* StringTable::EnsureCapacity(const Isolate* isolate,
+StringTable::Data* StringTable::EnsureCapacity(IsolateRoot isolate,
int additional_elements) {
// This call is only allowed while the write mutex is held.
write_mutex_.AssertHeld();
@@ -677,7 +678,7 @@ Address StringTable::TryStringToIndexOrLookupExisting(Isolate* isolate,
isolate, string, source, start);
}
-void StringTable::Print(const Isolate* isolate) const {
+void StringTable::Print(IsolateRoot isolate) const {
data_.load(std::memory_order_acquire)->Print(isolate);
}
diff --git a/deps/v8/src/objects/string-table.h b/deps/v8/src/objects/string-table.h
index 9efcc6e016..cdbb22db80 100644
--- a/deps/v8/src/objects/string-table.h
+++ b/deps/v8/src/objects/string-table.h
@@ -77,7 +77,7 @@ class V8_EXPORT_PRIVATE StringTable {
static Address TryStringToIndexOrLookupExisting(Isolate* isolate,
Address raw_string);
- void Print(const Isolate* isolate) const;
+ void Print(IsolateRoot isolate) const;
size_t GetCurrentMemoryUsage() const;
// The following methods must be called either while holding the write lock,
@@ -89,7 +89,7 @@ class V8_EXPORT_PRIVATE StringTable {
private:
class Data;
- Data* EnsureCapacity(const Isolate* isolate, int additional_elements);
+ Data* EnsureCapacity(IsolateRoot isolate, int additional_elements);
std::atomic<Data*> data_;
// Write mutex is mutable so that readers of concurrently mutated values (e.g.
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index c450485a1b..4c023f9801 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -6,6 +6,7 @@
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
+#include "src/execution/thread-id.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/memory-chunk.h"
@@ -117,17 +118,16 @@ void String::MakeThin(Isolate* isolate, String internalized) {
bool has_pointers = StringShape(*this).IsIndirect();
int old_size = this->Size();
- // Slot invalidation is not necessary here: ThinString only stores tagged
- // value, so it can't store an untagged value in a recorded slot.
- isolate->heap()->NotifyObjectLayoutChange(*this, no_gc,
- InvalidateRecordedSlots::kNo);
bool one_byte = internalized.IsOneByteRepresentation();
Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
: isolate->factory()->thin_string_map();
+ // Update actual first and then do release store on the map word. This ensures
+ // that the concurrent marker will read the pointer when visiting a
+ // ThinString.
+ ThinString thin = ThinString::unchecked_cast(*this);
+ thin.set_actual(internalized);
DCHECK_GE(old_size, ThinString::kSize);
this->synchronized_set_map(*map);
- ThinString thin = ThinString::cast(*this);
- thin.set_actual(internalized);
Address thin_end = thin.address() + ThinString::kSize;
int size_delta = old_size - ThinString::kSize;
if (size_delta != 0) {
@@ -168,6 +168,11 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
isolate->heap()->NotifyObjectLayoutChange(*this, no_allocation,
InvalidateRecordedSlots::kYes);
}
+
+ // Disallow garbage collection to avoid possible GC vs string access deadlock.
+ DisallowGarbageCollection no_gc;
+ base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
+ isolate->string_access());
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
// string occupies is too small for a regular external string. Instead, we
@@ -198,6 +203,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
this->synchronized_set_map(new_map);
ExternalTwoByteString self = ExternalTwoByteString::cast(*this);
+ self.AllocateExternalPointerEntries(isolate);
self.SetResource(isolate, resource);
isolate->heap()->RegisterExternalString(*this);
if (is_internalized) self.Hash(); // Force regeneration of the hash value.
@@ -239,6 +245,11 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
isolate->heap()->NotifyObjectLayoutChange(*this, no_allocation,
InvalidateRecordedSlots::kYes);
}
+
+ // Disallow garbage collection to avoid possible GC vs string access deadlock.
+ DisallowGarbageCollection no_gc;
+ base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
+ isolate->string_access());
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
// string occupies is too small for a regular external string. Instead, we
@@ -268,6 +279,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
this->synchronized_set_map(new_map);
ExternalOneByteString self = ExternalOneByteString::cast(*this);
+ self.AllocateExternalPointerEntries(isolate);
self.SetResource(isolate, resource);
isolate->heap()->RegisterExternalString(*this);
if (is_internalized) self.Hash(); // Force regeneration of the hash value.
@@ -519,6 +531,15 @@ Handle<Object> String::ToNumber(Isolate* isolate, Handle<String> subject) {
String::FlatContent String::GetFlatContent(
const DisallowHeapAllocation& no_gc) {
+#if DEBUG
+ // Check that this method is called only from the main thread.
+ {
+ Isolate* isolate;
+ // We don't have to check read only strings as those won't move.
+ DCHECK_IMPLIES(GetIsolateFromHeapObject(*this, &isolate),
+ ThreadId::Current() == isolate->thread_id());
+ }
+#endif
USE(no_gc);
int length = this->length();
StringShape shape(*this);
@@ -527,7 +548,7 @@ String::FlatContent String::GetFlatContent(
if (shape.representation_tag() == kConsStringTag) {
ConsString cons = ConsString::cast(string);
if (cons.second().length() != 0) {
- return FlatContent();
+ return FlatContent(no_gc);
}
string = cons.first();
shape = StringShape(string);
@@ -553,7 +574,7 @@ String::FlatContent String::GetFlatContent(
} else {
start = ExternalOneByteString::cast(string).GetChars();
}
- return FlatContent(start + offset, length);
+ return FlatContent(start + offset, length, no_gc);
} else {
DCHECK_EQ(shape.encoding_tag(), kTwoByteStringTag);
const uc16* start;
@@ -562,7 +583,7 @@ String::FlatContent String::GetFlatContent(
} else {
start = ExternalTwoByteString::cast(string).GetChars();
}
- return FlatContent(start + offset, length);
+ return FlatContent(start + offset, length, no_gc);
}
}
@@ -618,11 +639,9 @@ std::unique_ptr<char[]> String::ToCString(AllowNullsFlag allow_nulls,
}
template <typename sinkchar>
-void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
+void String::WriteToFlat(String source, sinkchar* sink, int from, int to) {
DisallowHeapAllocation no_gc;
- String source = src;
- int from = f;
- int to = t;
+ SharedStringAccessGuardIfNeeded access_guard(source);
while (from < to) {
DCHECK_LE(0, from);
DCHECK_LE(to, source.length());
@@ -638,13 +657,17 @@ void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
return;
}
case kOneByteStringTag | kSeqStringTag: {
- CopyChars(sink, SeqOneByteString::cast(source).GetChars(no_gc) + from,
- to - from);
+ CopyChars(
+ sink,
+ SeqOneByteString::cast(source).GetChars(no_gc, access_guard) + from,
+ to - from);
return;
}
case kTwoByteStringTag | kSeqStringTag: {
- CopyChars(sink, SeqTwoByteString::cast(source).GetChars(no_gc) + from,
- to - from);
+ CopyChars(
+ sink,
+ SeqTwoByteString::cast(source).GetChars(no_gc, access_guard) + from,
+ to - from);
return;
}
case kOneByteStringTag | kConsStringTag:
@@ -677,9 +700,10 @@ void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
if (to - boundary == 1) {
sink[boundary - from] = static_cast<sinkchar>(second.Get(0));
} else if (second.IsSeqOneByteString()) {
- CopyChars(sink + boundary - from,
- SeqOneByteString::cast(second).GetChars(no_gc),
- to - boundary);
+ CopyChars(
+ sink + boundary - from,
+ SeqOneByteString::cast(second).GetChars(no_gc, access_guard),
+ to - boundary);
} else {
WriteToFlat(second, sink + boundary - from, 0, to - boundary);
}
@@ -1510,24 +1534,19 @@ int ExternalString::ExternalPayloadSize() const {
}
FlatStringReader::FlatStringReader(Isolate* isolate, Handle<String> str)
- : Relocatable(isolate), str_(str.location()), length_(str->length()) {
+ : Relocatable(isolate), str_(str), length_(str->length()) {
+#if DEBUG
+ // Check that this constructor is called only from the main thread.
+ DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
+#endif
PostGarbageCollection();
}
-FlatStringReader::FlatStringReader(Isolate* isolate, Vector<const char> input)
- : Relocatable(isolate),
- str_(nullptr),
- is_one_byte_(true),
- length_(input.length()),
- start_(input.begin()) {}
-
void FlatStringReader::PostGarbageCollection() {
- if (str_ == nullptr) return;
- Handle<String> str(str_);
- DCHECK(str->IsFlat());
+ DCHECK(str_->IsFlat());
DisallowHeapAllocation no_gc;
// This does not actually prevent the vector from being relocated later.
- String::FlatContent content = str->GetFlatContent(no_gc);
+ String::FlatContent content = str_->GetFlatContent(no_gc);
DCHECK(content.IsFlat());
is_one_byte_ = content.IsOneByte();
if (is_one_byte_) {
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 0b7bd55aee..dc4381b39d 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -13,6 +13,7 @@
#include "src/objects/name.h"
#include "src/objects/smi.h"
#include "src/strings/unicode-decoder.h"
+#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -20,6 +21,8 @@
namespace v8 {
namespace internal {
+class SharedStringAccessGuardIfNeeded;
+
enum InstanceType : uint16_t;
enum AllowNullsFlag { ALLOW_NULLS, DISALLOW_NULLS };
@@ -80,6 +83,8 @@ class StringShape {
#endif
};
+#include "torque-generated/src/objects/string-tq.inc"
+
// The String abstract class captures JavaScript string values:
//
// Ecma-262:
@@ -97,6 +102,10 @@ class String : public TorqueGeneratedString<String, Name> {
// A flat string has content that's encoded as a sequence of either
// one-byte chars or two-byte UC16.
// Returned by String::GetFlatContent().
+ // Not safe to use from concurrent background threads.
+ // TODO(solanes): Move FlatContent into FlatStringReader, and make it private.
+ // This would de-duplicate code, as well as taking advantage of the fact that
+ // FlatStringReader is relocatable.
class FlatContent {
public:
// Returns true if the string is flat and this structure contains content.
@@ -134,11 +143,20 @@ class String : public TorqueGeneratedString<String, Name> {
enum State { NON_FLAT, ONE_BYTE, TWO_BYTE };
// Constructors only used by String::GetFlatContent().
- explicit FlatContent(const uint8_t* start, int length)
- : onebyte_start(start), length_(length), state_(ONE_BYTE) {}
- explicit FlatContent(const uc16* start, int length)
- : twobyte_start(start), length_(length), state_(TWO_BYTE) {}
- FlatContent() : onebyte_start(nullptr), length_(0), state_(NON_FLAT) {}
+ FlatContent(const uint8_t* start, int length,
+ const DisallowHeapAllocation& no_gc)
+ : onebyte_start(start),
+ length_(length),
+ state_(ONE_BYTE),
+ no_gc_(no_gc) {}
+ FlatContent(const uc16* start, int length,
+ const DisallowHeapAllocation& no_gc)
+ : twobyte_start(start),
+ length_(length),
+ state_(TWO_BYTE),
+ no_gc_(no_gc) {}
+ explicit FlatContent(const DisallowHeapAllocation& no_gc)
+ : onebyte_start(nullptr), length_(0), state_(NON_FLAT), no_gc_(no_gc) {}
union {
const uint8_t* onebyte_start;
@@ -146,6 +164,7 @@ class String : public TorqueGeneratedString<String, Name> {
};
int length_;
State state_;
+ const DisallowHeapAllocation& no_gc_;
friend class String;
friend class IterableSubString;
@@ -157,10 +176,18 @@ class String : public TorqueGeneratedString<String, Name> {
V8_INLINE Vector<const Char> GetCharVector(
const DisallowHeapAllocation& no_gc);
- // Get chars from sequential or external strings.
+ // Get chars from sequential or external strings. May only be called when a
+ // SharedStringAccessGuard is not needed (i.e. on the main thread or on
+ // read-only strings).
template <typename Char>
inline const Char* GetChars(const DisallowHeapAllocation& no_gc);
+ // Get chars from sequential or external strings.
+ template <typename Char>
+ inline const Char* GetChars(
+ const DisallowHeapAllocation& no_gc,
+ const SharedStringAccessGuardIfNeeded& access_guard);
+
// Returns the address of the character at an offset into this string.
// Requires: this->IsFlat()
const byte* AddressOfCharacterAt(int start_index,
@@ -558,8 +585,15 @@ class SeqOneByteString
// Get the address of the characters in this string.
inline Address GetCharsAddress();
+ // Get a pointer to the characters of the string. May only be called when a
+ // SharedStringAccessGuard is not needed (i.e. on the main thread or on
+ // read-only strings).
inline uint8_t* GetChars(const DisallowHeapAllocation& no_gc);
+ // Get a pointer to the characters of the string.
+ inline uint8_t* GetChars(const DisallowHeapAllocation& no_gc,
+ const SharedStringAccessGuardIfNeeded& access_guard);
+
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic.
void clear_padding();
@@ -596,8 +630,15 @@ class SeqTwoByteString
// Get the address of the characters in this string.
inline Address GetCharsAddress();
+ // Get a pointer to the characters of the string. May only be called when a
+ // SharedStringAccessGuard is not needed (i.e. on the main thread or on
+ // read-only strings).
inline uc16* GetChars(const DisallowHeapAllocation& no_gc);
+ // Get a pointer to the characters of the string.
+ inline uc16* GetChars(const DisallowHeapAllocation& no_gc,
+ const SharedStringAccessGuardIfNeeded& access_guard);
+
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic.
void clear_padding();
@@ -721,6 +762,8 @@ class ExternalString : public String {
static const int kUncachedSize =
kResourceOffset + FIELD_SIZE(kResourceOffset);
+ inline void AllocateExternalPointerEntries(Isolate* isolate);
+
// Return whether the external string data pointer is not cached.
inline bool is_uncached() const;
// Size in bytes of the external payload.
@@ -729,8 +772,8 @@ class ExternalString : public String {
// Used in the serializer/deserializer.
DECL_GETTER(resource_as_address, Address)
inline void set_address_as_resource(Isolate* isolate, Address address);
- inline uint32_t resource_as_uint32();
- inline void set_uint32_as_resource(Isolate* isolate, uint32_t value);
+ inline uint32_t GetResourceRefForDeserialization();
+ inline void SetResourceRefForSerialization(uint32_t ref);
// Disposes string's resource object if it has not already been disposed.
inline void DisposeResource(Isolate* isolate);
@@ -755,6 +798,7 @@ class ExternalOneByteString : public ExternalString {
// It is assumed that the previous resource is null. If it is not null, then
// it is the responsability of the caller the handle the previous resource.
inline void SetResource(Isolate* isolate, const Resource* buffer);
+
// Used only during serialization.
inline void set_resource(Isolate* isolate, const Resource* buffer);
@@ -796,6 +840,7 @@ class ExternalTwoByteString : public ExternalString {
// It is assumed that the previous resource is null. If it is not null, then
// it is the responsability of the caller the handle the previous resource.
inline void SetResource(Isolate* isolate, const Resource* buffer);
+
// Used only during serialization.
inline void set_resource(Isolate* isolate, const Resource* buffer);
@@ -827,12 +872,12 @@ class ExternalTwoByteString : public ExternalString {
};
// A flat string reader provides random access to the contents of a
-// string independent of the character width of the string. The handle
+// string independent of the character width of the string. The handle
// must be valid as long as the reader is being used.
+// Not safe to use from concurrent background threads.
class V8_EXPORT_PRIVATE FlatStringReader : public Relocatable {
public:
FlatStringReader(Isolate* isolate, Handle<String> str);
- FlatStringReader(Isolate* isolate, Vector<const char> input);
void PostGarbageCollection() override;
inline uc32 Get(int index);
template <typename Char>
@@ -840,7 +885,7 @@ class V8_EXPORT_PRIVATE FlatStringReader : public Relocatable {
int length() { return length_; }
private:
- Address* str_;
+ Handle<String> str_;
bool is_one_byte_;
int length_;
const void* start_;
@@ -855,6 +900,8 @@ class ConsStringIterator {
inline explicit ConsStringIterator(ConsString cons_string, int offset = 0) {
Reset(cons_string, offset);
}
+ ConsStringIterator(const ConsStringIterator&) = delete;
+ ConsStringIterator& operator=(const ConsStringIterator&) = delete;
inline void Reset(ConsString cons_string, int offset = 0) {
depth_ = 0;
// Next will always return nullptr.
@@ -893,12 +940,13 @@ class ConsStringIterator {
int depth_;
int maximum_depth_;
int consumed_;
- DISALLOW_COPY_AND_ASSIGN(ConsStringIterator);
};
class StringCharacterStream {
public:
inline explicit StringCharacterStream(String string, int offset = 0);
+ StringCharacterStream(const StringCharacterStream&) = delete;
+ StringCharacterStream& operator=(const StringCharacterStream&) = delete;
inline uint16_t GetNext();
inline bool HasMore();
inline void Reset(String string, int offset = 0);
@@ -913,7 +961,6 @@ class StringCharacterStream {
const uint16_t* buffer16_;
};
const uint8_t* end_;
- DISALLOW_COPY_AND_ASSIGN(StringCharacterStream);
};
template <typename Char>
diff --git a/deps/v8/src/objects/string.tq b/deps/v8/src/objects/string.tq
index 1bc51ce5da..df9b0f4ff0 100644
--- a/deps/v8/src/objects/string.tq
+++ b/deps/v8/src/objects/string.tq
@@ -25,6 +25,11 @@ extern class ExternalString extends String {
resource_data: ExternalPointer;
}
+extern operator '.resource_ptr' macro LoadExternalStringResourcePtr(
+ ExternalString): RawPtr;
+extern operator '.resource_data_ptr' macro LoadExternalStringResourceDataPtr(
+ ExternalString): RawPtr;
+
@doNotGenerateCast
extern class ExternalOneByteString extends ExternalString {
}
diff --git a/deps/v8/src/objects/struct-inl.h b/deps/v8/src/objects/struct-inl.h
index afc4c6ce49..b313bc43f8 100644
--- a/deps/v8/src/objects/struct-inl.h
+++ b/deps/v8/src/objects/struct-inl.h
@@ -11,7 +11,6 @@
#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
#include "src/roots/roots-inl.h"
-#include "torque-generated/class-definitions-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -19,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/struct-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(Struct)
TQ_OBJECT_CONSTRUCTORS_IMPL(Tuple2)
TQ_OBJECT_CONSTRUCTORS_IMPL(AccessorPair)
diff --git a/deps/v8/src/objects/struct.h b/deps/v8/src/objects/struct.h
index fcae2e593d..fa4fe42b62 100644
--- a/deps/v8/src/objects/struct.h
+++ b/deps/v8/src/objects/struct.h
@@ -7,7 +7,6 @@
#include "src/objects/heap-object.h"
#include "src/objects/objects.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/struct-tq.inc"
+
// An abstract superclass, a marker class really, for simple structure classes.
// It doesn't carry any functionality but allows struct classes to be
// identified in the type system.
diff --git a/deps/v8/src/objects/synthetic-module-inl.h b/deps/v8/src/objects/synthetic-module-inl.h
new file mode 100644
index 0000000000..a958e50373
--- /dev/null
+++ b/deps/v8/src/objects/synthetic-module-inl.h
@@ -0,0 +1,27 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SYNTHETIC_MODULE_INL_H_
+#define V8_OBJECTS_SYNTHETIC_MODULE_INL_H_
+
+#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/synthetic-module.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/synthetic-module-tq-inl.inc"
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(SyntheticModule)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_SYNTHETIC_MODULE_INL_H_
diff --git a/deps/v8/src/objects/synthetic-module.cc b/deps/v8/src/objects/synthetic-module.cc
index abe9ad2ed2..6c288f97cf 100644
--- a/deps/v8/src/objects/synthetic-module.cc
+++ b/deps/v8/src/objects/synthetic-module.cc
@@ -10,6 +10,7 @@
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/synthetic-module-inl.h"
#include "src/utils/ostreams.h"
namespace v8 {
@@ -59,7 +60,7 @@ MaybeHandle<Cell> SyntheticModule::ResolveExport(
if (!must_resolve) return MaybeHandle<Cell>();
- return isolate->Throw<Cell>(
+ return isolate->ThrowAt<Cell>(
isolate->factory()->NewSyntaxError(MessageTemplate::kUnresolvableExport,
module_specifier, export_name),
&loc);
diff --git a/deps/v8/src/objects/synthetic-module.h b/deps/v8/src/objects/synthetic-module.h
index 8ac6668170..7c0060e6f0 100644
--- a/deps/v8/src/objects/synthetic-module.h
+++ b/deps/v8/src/objects/synthetic-module.h
@@ -13,6 +13,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/synthetic-module-tq.inc"
+
// The runtime representation of a Synthetic Module Record, a module that can be
// instantiated by an embedder with embedder-defined exports and evaluation
// steps.
diff --git a/deps/v8/src/objects/tagged-field-inl.h b/deps/v8/src/objects/tagged-field-inl.h
index fed3192dd9..eaaa557431 100644
--- a/deps/v8/src/objects/tagged-field-inl.h
+++ b/deps/v8/src/objects/tagged-field-inl.h
@@ -61,7 +61,7 @@ T TaggedField<T, kFieldOffset>::load(HeapObject host, int offset) {
// static
template <typename T, int kFieldOffset>
-T TaggedField<T, kFieldOffset>::load(const Isolate* isolate, HeapObject host,
+T TaggedField<T, kFieldOffset>::load(IsolateRoot isolate, HeapObject host,
int offset) {
Tagged_t value = *location(host, offset);
return T(tagged_to_full(isolate, value));
@@ -70,7 +70,7 @@ T TaggedField<T, kFieldOffset>::load(const Isolate* isolate, HeapObject host,
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::store(HeapObject host, T value) {
-#ifdef V8_CONCURRENT_MARKING
+#ifdef V8_ATOMIC_OBJECT_FIELD_WRITES
Relaxed_Store(host, value);
#else
*location(host) = full_to_tagged(value.ptr());
@@ -80,7 +80,7 @@ void TaggedField<T, kFieldOffset>::store(HeapObject host, T value) {
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::store(HeapObject host, int offset, T value) {
-#ifdef V8_CONCURRENT_MARKING
+#ifdef V8_ATOMIC_OBJECT_FIELD_WRITES
Relaxed_Store(host, offset, value);
#else
*location(host, offset) = full_to_tagged(value.ptr());
@@ -96,8 +96,7 @@ T TaggedField<T, kFieldOffset>::Relaxed_Load(HeapObject host, int offset) {
// static
template <typename T, int kFieldOffset>
-template <typename LocalIsolate>
-T TaggedField<T, kFieldOffset>::Relaxed_Load(const LocalIsolate* isolate,
+T TaggedField<T, kFieldOffset>::Relaxed_Load(IsolateRoot isolate,
HeapObject host, int offset) {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location(host, offset));
return T(tagged_to_full(isolate, value));
@@ -126,8 +125,7 @@ T TaggedField<T, kFieldOffset>::Acquire_Load(HeapObject host, int offset) {
// static
template <typename T, int kFieldOffset>
-template <typename LocalIsolate>
-T TaggedField<T, kFieldOffset>::Acquire_Load(const LocalIsolate* isolate,
+T TaggedField<T, kFieldOffset>::Acquire_Load(IsolateRoot isolate,
HeapObject host, int offset) {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location(host, offset));
return T(tagged_to_full(isolate, value));
diff --git a/deps/v8/src/objects/tagged-field.h b/deps/v8/src/objects/tagged-field.h
index 82b6268ecd..8560c54cc4 100644
--- a/deps/v8/src/objects/tagged-field.h
+++ b/deps/v8/src/objects/tagged-field.h
@@ -38,22 +38,20 @@ class TaggedField : public AllStatic {
static inline Address address(HeapObject host, int offset = 0);
static inline T load(HeapObject host, int offset = 0);
- static inline T load(const Isolate* isolate, HeapObject host, int offset = 0);
+ static inline T load(IsolateRoot isolate, HeapObject host, int offset = 0);
static inline void store(HeapObject host, T value);
static inline void store(HeapObject host, int offset, T value);
static inline T Relaxed_Load(HeapObject host, int offset = 0);
- template <typename LocalIsolate>
- static T Relaxed_Load(const LocalIsolate* isolate, HeapObject host,
- int offset = 0);
+ static inline T Relaxed_Load(IsolateRoot isolate, HeapObject host,
+ int offset = 0);
static inline void Relaxed_Store(HeapObject host, T value);
- static void Relaxed_Store(HeapObject host, int offset, T value);
+ static inline void Relaxed_Store(HeapObject host, int offset, T value);
static inline T Acquire_Load(HeapObject host, int offset = 0);
- template <typename LocalIsolate>
- static inline T Acquire_Load(const LocalIsolate* isolate, HeapObject host,
+ static inline T Acquire_Load(IsolateRoot isolate, HeapObject host,
int offset = 0);
static inline void Release_Store(HeapObject host, T value);
diff --git a/deps/v8/src/objects/template-objects-inl.h b/deps/v8/src/objects/template-objects-inl.h
index 3718955fb7..caae8ed8ed 100644
--- a/deps/v8/src/objects/template-objects-inl.h
+++ b/deps/v8/src/objects/template-objects-inl.h
@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/template-objects-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(TemplateObjectDescription)
TQ_OBJECT_CONSTRUCTORS_IMPL(CachedTemplateObject)
diff --git a/deps/v8/src/objects/template-objects.h b/deps/v8/src/objects/template-objects.h
index 094485de50..8e888f6ca0 100644
--- a/deps/v8/src/objects/template-objects.h
+++ b/deps/v8/src/objects/template-objects.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/template-objects-tq.inc"
+
// CachedTemplateObject is a tuple used to cache a TemplateObject that has been
// created. All the CachedTemplateObject's for a given SharedFunctionInfo form a
// linked list via the next fields.
diff --git a/deps/v8/src/objects/templates-inl.h b/deps/v8/src/objects/templates-inl.h
index 8dd5aa6e2d..613a4279a4 100644
--- a/deps/v8/src/objects/templates-inl.h
+++ b/deps/v8/src/objects/templates-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/templates-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(TemplateInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(FunctionTemplateInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(ObjectTemplateInfo)
@@ -36,6 +38,9 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache, DoNotCacheBit::kShift)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
AcceptAnyReceiverBit::kShift)
+RELEASE_ACQUIRE_ACCESSORS(FunctionTemplateInfo, call_code, HeapObject,
+ kCallCodeOffset)
+
// static
FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
@@ -129,6 +134,14 @@ void ObjectTemplateInfo::set_immutable_proto(bool immutable) {
return set_data(IsImmutablePrototypeBit::update(data(), immutable));
}
+bool ObjectTemplateInfo::code_like() const {
+ return IsCodeKindBit::decode(data());
+}
+
+void ObjectTemplateInfo::set_code_like(bool is_code_like) {
+ return set_data(IsCodeKindBit::update(data(), is_code_like));
+}
+
bool FunctionTemplateInfo::IsTemplateFor(JSObject object) {
return IsTemplateFor(object.map());
}
diff --git a/deps/v8/src/objects/templates.h b/deps/v8/src/objects/templates.h
index 5aa0dc16a3..13d68ef391 100644
--- a/deps/v8/src/objects/templates.h
+++ b/deps/v8/src/objects/templates.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/templates-tq.inc"
+
class TemplateInfo : public TorqueGeneratedTemplateInfo<TemplateInfo, Struct> {
public:
NEVER_READ_ONLY_SPACE
@@ -85,6 +87,8 @@ class FunctionTemplateInfo
DECL_RARE_ACCESSORS(c_signature, CSignature, Object)
#undef DECL_RARE_ACCESSORS
+ DECL_RELEASE_ACQUIRE_ACCESSORS(call_code, HeapObject)
+
// Begin flag bits ---------------------
DECL_BOOLEAN_ACCESSORS(undetectable)
@@ -156,6 +160,7 @@ class ObjectTemplateInfo
public:
DECL_INT_ACCESSORS(embedder_field_count)
DECL_BOOLEAN_ACCESSORS(immutable_proto)
+ DECL_BOOLEAN_ACCESSORS(code_like)
// Dispatched behavior.
DECL_PRINTER(ObjectTemplateInfo)
diff --git a/deps/v8/src/objects/template.tq b/deps/v8/src/objects/templates.tq
index 1336fb19ba..564d3569dc 100644
--- a/deps/v8/src/objects/template.tq
+++ b/deps/v8/src/objects/templates.tq
@@ -65,7 +65,8 @@ extern class FunctionTemplateInfo extends TemplateInfo {
bitfield struct ObjectTemplateInfoFlags extends uint31 {
is_immutable_prototype: bool: 1 bit;
- embedder_field_count: int32: 29 bit;
+ is_code_kind: bool: 1 bit;
+ embedder_field_count: int32: 28 bit;
}
@generateCppClass
diff --git a/deps/v8/src/objects/torque-defined-classes-inl.h b/deps/v8/src/objects/torque-defined-classes-inl.h
new file mode 100644
index 0000000000..2579e9f430
--- /dev/null
+++ b/deps/v8/src/objects/torque-defined-classes-inl.h
@@ -0,0 +1,23 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef V8_OBJECTS_TORQUE_DEFINED_CLASSES_INL_H_
+#define V8_OBJECTS_TORQUE_DEFINED_CLASSES_INL_H_
+
+#include "src/objects/objects-inl.h"
+#include "src/objects/torque-defined-classes.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/torque-defined-classes-tq-inl.inc"
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_TORQUE_DEFINED_CLASSES_INL_H_
diff --git a/deps/v8/src/objects/torque-defined-classes.h b/deps/v8/src/objects/torque-defined-classes.h
new file mode 100644
index 0000000000..aeea4e1c53
--- /dev/null
+++ b/deps/v8/src/objects/torque-defined-classes.h
@@ -0,0 +1,25 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef V8_OBJECTS_TORQUE_DEFINED_CLASSES_H_
+#define V8_OBJECTS_TORQUE_DEFINED_CLASSES_H_
+
+#include "src/objects/descriptor-array.h"
+#include "src/objects/fixed-array.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/torque-defined-classes-tq.inc"
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_TORQUE_DEFINED_CLASSES_H_
diff --git a/deps/v8/src/objects/torque-defined-classes.tq b/deps/v8/src/objects/torque-defined-classes.tq
new file mode 100644
index 0000000000..883576777b
--- /dev/null
+++ b/deps/v8/src/objects/torque-defined-classes.tq
@@ -0,0 +1,17 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/torque-defined-classes.h"
+
+// Classes defined in Torque that are not exported are attributed to this file,
+// independently of where they are actually defined. This gives them
+// corresponding C++ headers and removes the need to add another C++ header for
+// each file defining such a class.
+// In addition, classes defined in the test directory are also attributed to
+// here, because there is no directory corresponding to src/objects in test/ and
+// it would be confusing to add one there.
+
+// The corresponding C++ headers are:
+// - src/objects/torque-defined-classes.h
+// - src/objects/torque-defined-classes-inl.h
diff --git a/deps/v8/src/objects/transitions-inl.h b/deps/v8/src/objects/transitions-inl.h
index b2ee5366b2..d9d2f83a7f 100644
--- a/deps/v8/src/objects/transitions-inl.h
+++ b/deps/v8/src/objects/transitions-inl.h
@@ -5,13 +5,13 @@
#ifndef V8_OBJECTS_TRANSITIONS_INL_H_
#define V8_OBJECTS_TRANSITIONS_INL_H_
-#include "src/objects/transitions.h"
-
#include "src/ic/handler-configuration-inl.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/maybe-object-inl.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
+#include "src/objects/transitions.h"
+#include "src/snapshot/deserializer.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -100,7 +100,7 @@ HeapObjectSlot TransitionArray::GetTargetSlot(int transition_number) {
PropertyDetails TransitionsAccessor::GetTargetDetails(Name name, Map target) {
DCHECK(!IsSpecialTransition(name.GetReadOnlyRoots(), name));
InternalIndex descriptor = target.LastAdded();
- DescriptorArray descriptors = target.instance_descriptors();
+ DescriptorArray descriptors = target.instance_descriptors(kRelaxedLoad);
// Transitions are allowed only for the last added property.
DCHECK(descriptors.GetKey(descriptor).Equals(name));
return descriptors.GetDetails(descriptor);
@@ -113,7 +113,7 @@ PropertyDetails TransitionsAccessor::GetSimpleTargetDetails(Map transition) {
// static
Name TransitionsAccessor::GetSimpleTransitionKey(Map transition) {
InternalIndex descriptor = transition.LastAdded();
- return transition.instance_descriptors().GetKey(descriptor);
+ return transition.instance_descriptors(kRelaxedLoad).GetKey(descriptor);
}
// static
@@ -157,6 +157,14 @@ bool TransitionArray::GetTargetIfExists(int transition_number, Isolate* isolate,
Map* target) {
MaybeObject raw = GetRawTarget(transition_number);
HeapObject heap_object;
+ // If the raw target is a Smi, then this TransitionArray is in the process of
+ // being deserialized, and doesn't yet have an initialized entry for this
+ // transition.
+ if (raw.IsSmi()) {
+ DCHECK(isolate->has_active_deserializer());
+ DCHECK_EQ(raw.ToSmi(), Deserializer::uninitialized_field_value());
+ return false;
+ }
if (raw->GetHeapObjectIfStrong(&heap_object) &&
heap_object.IsUndefined(isolate)) {
return false;
diff --git a/deps/v8/src/objects/transitions.cc b/deps/v8/src/objects/transitions.cc
index d2c5f56fd5..f6623258d6 100644
--- a/deps/v8/src/objects/transitions.cc
+++ b/deps/v8/src/objects/transitions.cc
@@ -285,7 +285,7 @@ bool TransitionsAccessor::IsMatchingMap(Map target, Name name,
PropertyKind kind,
PropertyAttributes attributes) {
InternalIndex descriptor = target.LastAdded();
- DescriptorArray descriptors = target.instance_descriptors();
+ DescriptorArray descriptors = target.instance_descriptors(kRelaxedLoad);
Name key = descriptors.GetKey(descriptor);
if (key != name) return false;
return descriptors.GetDetails(descriptor)
@@ -330,7 +330,7 @@ Handle<WeakFixedArray> TransitionArray::GrowPrototypeTransitionArray(
Handle<WeakFixedArray> array, int new_capacity, Isolate* isolate) {
// Grow array by factor 2 up to MaxCachedPrototypeTransitions.
int capacity = array->length() - kProtoTransitionHeaderSize;
- new_capacity = Min(kMaxCachedPrototypeTransitions, new_capacity);
+ new_capacity = std::min({kMaxCachedPrototypeTransitions, new_capacity});
DCHECK_GT(new_capacity, capacity);
int grow_by = new_capacity - capacity;
array = isolate->factory()->CopyWeakFixedArrayAndGrow(array, grow_by);
@@ -530,7 +530,8 @@ void TransitionsAccessor::CheckNewTransitionsAreConsistent(
TransitionArray new_transitions = TransitionArray::cast(transitions);
for (int i = 0; i < old_transitions.number_of_transitions(); i++) {
Map target = old_transitions.GetTarget(i);
- if (target.instance_descriptors() == map_.instance_descriptors()) {
+ if (target.instance_descriptors(kRelaxedLoad) ==
+ map_.instance_descriptors(kRelaxedLoad)) {
Name key = old_transitions.GetKey(i);
int new_target_index;
if (IsSpecialTransition(ReadOnlyRoots(isolate_), key)) {
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index d9abe45124..d5f5f05c29 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -7,6 +7,7 @@
#include <type_traits>
#include "include/v8-value-serializer-version.h"
+#include "include/v8.h"
#include "src/api/api-inl.h"
#include "src/base/logging.h"
#include "src/execution/isolate.h"
@@ -20,9 +21,11 @@
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "src/objects/oddball-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/property-descriptor.h"
+#include "src/objects/property-details.h"
#include "src/objects/smi.h"
#include "src/objects/transitions-inl.h"
#include "src/snapshot/code-serializer.h"
@@ -384,7 +387,7 @@ void ValueSerializer::TransferArrayBuffer(uint32_t transfer_id,
Handle<JSArrayBuffer> array_buffer) {
DCHECK(!array_buffer_transfer_map_.Find(array_buffer));
DCHECK(!array_buffer->is_shared());
- array_buffer_transfer_map_.Set(array_buffer, transfer_id);
+ array_buffer_transfer_map_.Insert(array_buffer, transfer_id);
}
Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
@@ -500,16 +503,16 @@ void ValueSerializer::WriteString(Handle<String> string) {
Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
// If the object has already been serialized, just write its ID.
- uint32_t* id_map_entry = id_map_.Get(receiver);
- if (uint32_t id = *id_map_entry) {
+ auto find_result = id_map_.FindOrInsert(receiver);
+ if (find_result.already_exists) {
WriteTag(SerializationTag::kObjectReference);
- WriteVarint(id - 1);
+ WriteVarint(*find_result.entry - 1);
return ThrowIfOutOfMemory();
}
// Otherwise, allocate an ID for it.
uint32_t id = next_id_++;
- *id_map_entry = id + 1;
+ *find_result.entry = id + 1;
// Eliminate callable and exotic objects, which should not be serialized.
InstanceType instance_type = receiver->map().instance_type();
@@ -588,13 +591,15 @@ Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
uint32_t properties_written = 0;
bool map_changed = false;
for (InternalIndex i : map->IterateOwnDescriptors()) {
- Handle<Name> key(map->instance_descriptors().GetKey(i), isolate_);
+ Handle<Name> key(map->instance_descriptors(kRelaxedLoad).GetKey(i),
+ isolate_);
if (!key->IsString()) continue;
- PropertyDetails details = map->instance_descriptors().GetDetails(i);
+ PropertyDetails details =
+ map->instance_descriptors(kRelaxedLoad).GetDetails(i);
if (details.IsDontEnum()) continue;
Handle<Object> value;
- if (V8_LIKELY(!map_changed)) map_changed = *map == object->map();
+ if (V8_LIKELY(!map_changed)) map_changed = *map != object->map();
if (V8_LIKELY(!map_changed && details.location() == kField)) {
DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
@@ -715,13 +720,14 @@ Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
}
}
- KeyAccumulator accumulator(isolate_, KeyCollectionMode::kOwnOnly,
- ENUMERABLE_STRINGS);
- if (!accumulator.CollectOwnPropertyNames(array, array).FromMaybe(false)) {
+ Handle<FixedArray> keys;
+ if (!KeyAccumulator::GetKeys(array, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS,
+ GetKeysConversion::kKeepNumbers, false, true)
+ .ToHandle(&keys)) {
return Nothing<bool>();
}
- Handle<FixedArray> keys =
- accumulator.GetKeys(GetKeysConversion::kConvertToString);
+
uint32_t properties_written;
if (!WriteJSObjectPropertiesSlow(array, keys).To(&properties_written)) {
return Nothing<bool>();
@@ -790,13 +796,12 @@ Maybe<bool> ValueSerializer::WriteJSMap(Handle<JSMap> map) {
{
DisallowHeapAllocation no_gc;
Oddball the_hole = ReadOnlyRoots(isolate_).the_hole_value();
- int capacity = table->UsedCapacity();
int result_index = 0;
- for (int i = 0; i < capacity; i++) {
- Object key = table->KeyAt(i);
+ for (InternalIndex entry : table->IterateEntries()) {
+ Object key = table->KeyAt(entry);
if (key == the_hole) continue;
entries->set(result_index++, key);
- entries->set(result_index++, table->ValueAt(i));
+ entries->set(result_index++, table->ValueAt(entry));
}
DCHECK_EQ(result_index, length);
}
@@ -821,10 +826,9 @@ Maybe<bool> ValueSerializer::WriteJSSet(Handle<JSSet> set) {
{
DisallowHeapAllocation no_gc;
Oddball the_hole = ReadOnlyRoots(isolate_).the_hole_value();
- int capacity = table->UsedCapacity();
int result_index = 0;
- for (int i = 0; i < capacity; i++) {
- Object key = table->KeyAt(i);
+ for (InternalIndex entry : table->IterateEntries()) {
+ Object key = table->KeyAt(entry);
if (key == the_hole) continue;
entries->set(result_index++, key);
}
@@ -1647,8 +1651,12 @@ MaybeHandle<JSRegExp> ValueDeserializer::ReadJSRegExp() {
}
// Ensure the deserialized flags are valid.
- uint32_t flags_mask = static_cast<uint32_t>(-1) << JSRegExp::kFlagCount;
- if ((raw_flags & flags_mask) ||
+ uint32_t bad_flags_mask = static_cast<uint32_t>(-1) << JSRegExp::kFlagCount;
+ // kLinear is accepted only with the appropriate flag.
+ if (!FLAG_enable_experimental_regexp_engine) {
+ bad_flags_mask |= JSRegExp::kLinear;
+ }
+ if ((raw_flags & bad_flags_mask) ||
!JSRegExp::New(isolate_, pattern, static_cast<JSRegExp::Flags>(raw_flags))
.ToHandle(&regexp)) {
return MaybeHandle<JSRegExp>();
@@ -1980,7 +1988,8 @@ static void CommitProperties(Handle<JSObject> object, Handle<Map> map,
DCHECK(!object->map().is_dictionary_map());
DisallowHeapAllocation no_gc;
- DescriptorArray descriptors = object->map().instance_descriptors();
+ DescriptorArray descriptors =
+ object->map().instance_descriptors(kRelaxedLoad);
for (InternalIndex i : InternalIndex::Range(properties.size())) {
// Initializing store.
object->WriteToField(i, descriptors.GetDetails(i),
@@ -2002,7 +2011,8 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
bool transitioning = true;
Handle<Map> map(object->map(), isolate_);
DCHECK(!map->is_dictionary_map());
- DCHECK_EQ(0, map->instance_descriptors().number_of_descriptors());
+ DCHECK_EQ(0,
+ map->instance_descriptors(kRelaxedLoad).number_of_descriptors());
std::vector<Handle<Object>> properties;
properties.reserve(8);
@@ -2053,11 +2063,11 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
if (transitioning) {
InternalIndex descriptor(properties.size());
PropertyDetails details =
- target->instance_descriptors().GetDetails(descriptor);
+ target->instance_descriptors(kRelaxedLoad).GetDetails(descriptor);
Representation expected_representation = details.representation();
if (value->FitsRepresentation(expected_representation)) {
if (expected_representation.IsHeapObject() &&
- !target->instance_descriptors()
+ !target->instance_descriptors(kRelaxedLoad)
.GetFieldType(descriptor)
.NowContains(value)) {
Handle<FieldType> value_type =
@@ -2066,7 +2076,7 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
details.constness(), expected_representation,
value_type);
}
- DCHECK(target->instance_descriptors()
+ DCHECK(target->instance_descriptors(kRelaxedLoad)
.GetFieldType(descriptor)
.NowContains(value));
properties.push_back(value);
diff --git a/deps/v8/src/objects/value-serializer.h b/deps/v8/src/objects/value-serializer.h
index acb3f3d25e..e06badece3 100644
--- a/deps/v8/src/objects/value-serializer.h
+++ b/deps/v8/src/objects/value-serializer.h
@@ -48,6 +48,8 @@ class ValueSerializer {
public:
ValueSerializer(Isolate* isolate, v8::ValueSerializer::Delegate* delegate);
~ValueSerializer();
+ ValueSerializer(const ValueSerializer&) = delete;
+ ValueSerializer& operator=(const ValueSerializer&) = delete;
/*
* Writes out a header, which includes the format version.
@@ -168,8 +170,6 @@ class ValueSerializer {
// A similar map, for transferred array buffers.
IdentityMap<uint32_t, ZoneAllocationPolicy> array_buffer_transfer_map_;
-
- DISALLOW_COPY_AND_ASSIGN(ValueSerializer);
};
/*
@@ -181,6 +181,8 @@ class ValueDeserializer {
ValueDeserializer(Isolate* isolate, Vector<const uint8_t> data,
v8::ValueDeserializer::Delegate* delegate);
~ValueDeserializer();
+ ValueDeserializer(const ValueDeserializer&) = delete;
+ ValueDeserializer& operator=(const ValueDeserializer&) = delete;
/*
* Runs version detection logic, which may fail if the format is invalid.
@@ -299,8 +301,6 @@ class ValueDeserializer {
// Always global handles.
Handle<FixedArray> id_map_;
MaybeHandle<SimpleNumberDictionary> array_buffer_transfer_map_;
-
- DISALLOW_COPY_AND_ASSIGN(ValueDeserializer);
};
} // namespace internal
diff --git a/deps/v8/src/parsing/DIR_METADATA b/deps/v8/src/parsing/DIR_METADATA
new file mode 100644
index 0000000000..165380ae4f
--- /dev/null
+++ b/deps/v8/src/parsing/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Parser"
+} \ No newline at end of file
diff --git a/deps/v8/src/parsing/OWNERS b/deps/v8/src/parsing/OWNERS
index 40e6e8b427..9d54af5f2d 100644
--- a/deps/v8/src/parsing/OWNERS
+++ b/deps/v8/src/parsing/OWNERS
@@ -5,5 +5,3 @@ littledan@chromium.org
marja@chromium.org
neis@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>Parser
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 5fd685505c..1c2b1b91ed 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -32,8 +32,6 @@ UnoptimizedCompileFlags::UnoptimizedCompileFlags(Isolate* isolate,
set_might_always_opt(FLAG_always_opt || FLAG_prepare_always_opt);
set_allow_natives_syntax(FLAG_allow_natives_syntax);
set_allow_lazy_compile(FLAG_lazy);
- set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
- set_allow_harmony_import_meta(FLAG_harmony_import_meta);
set_allow_harmony_private_methods(FLAG_harmony_private_methods);
set_collect_source_positions(!FLAG_enable_lazy_source_positions ||
isolate->NeedsDetailedOptimizedCodeLineInfo());
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index c774f0ae94..d99ddcda89 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -60,8 +60,6 @@ class Zone;
V(might_always_opt, bool, 1, _) \
V(allow_natives_syntax, bool, 1, _) \
V(allow_lazy_compile, bool, 1, _) \
- V(allow_harmony_dynamic_import, bool, 1, _) \
- V(allow_harmony_import_meta, bool, 1, _) \
V(allow_harmony_private_methods, bool, 1, _) \
V(is_oneshot_iife, bool, 1, _) \
V(collect_source_positions, bool, 1, _) \
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 47981c768b..ee54709345 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -1853,7 +1853,6 @@ ParserBase<Impl>::ParsePrimaryExpression() {
return ParseSuperExpression(is_new);
}
case Token::IMPORT:
- if (!flags().allow_harmony_dynamic_import()) break;
return ParseImportExpressions();
case Token::LBRACK:
@@ -2989,15 +2988,12 @@ ParserBase<Impl>::ParseCoalesceExpression(ExpressionT expression) {
bool first_nullish = true;
while (peek() == Token::NULLISH) {
SourceRange right_range;
- int pos;
- ExpressionT y;
- {
- SourceRangeScope right_range_scope(scanner(), &right_range);
- Consume(Token::NULLISH);
- pos = peek_position();
- // Parse BitwiseOR or higher.
- y = ParseBinaryExpression(6);
- }
+ SourceRangeScope right_range_scope(scanner(), &right_range);
+ Consume(Token::NULLISH);
+ int pos = peek_position();
+
+ // Parse BitwiseOR or higher.
+ ExpressionT y = ParseBinaryExpression(6);
if (first_nullish) {
expression =
factory()->NewBinaryOperation(Token::NULLISH, expression, y, pos);
@@ -3297,7 +3293,6 @@ ParserBase<Impl>::ParseLeftHandSideContinuation(ExpressionT result) {
bool optional_chaining = false;
bool is_optional = false;
- int optional_link_begin;
do {
switch (peek()) {
case Token::QUESTION_PERIOD: {
@@ -3305,16 +3300,10 @@ ParserBase<Impl>::ParseLeftHandSideContinuation(ExpressionT result) {
ReportUnexpectedToken(peek());
return impl()->FailureExpression();
}
- // Include the ?. in the source range position.
- optional_link_begin = scanner()->peek_location().beg_pos;
Consume(Token::QUESTION_PERIOD);
is_optional = true;
optional_chaining = true;
- if (Token::IsPropertyOrCall(peek())) continue;
- int pos = position();
- ExpressionT key = ParsePropertyOrPrivatePropertyName();
- result = factory()->NewProperty(result, key, pos, is_optional);
- break;
+ continue;
}
/* Property */
@@ -3394,7 +3383,14 @@ ParserBase<Impl>::ParseLeftHandSideContinuation(ExpressionT result) {
}
default:
- // Template literals in/after an Optional Chain not supported:
+ /* Optional Property */
+ if (is_optional) {
+ DCHECK_EQ(scanner()->current_token(), Token::QUESTION_PERIOD);
+ int pos = position();
+ ExpressionT key = ParsePropertyOrPrivatePropertyName();
+ result = factory()->NewProperty(result, key, pos, is_optional);
+ break;
+ }
if (optional_chaining) {
impl()->ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kOptionalChainingNoTemplate);
@@ -3405,12 +3401,8 @@ ParserBase<Impl>::ParseLeftHandSideContinuation(ExpressionT result) {
result = ParseTemplateLiteral(result, position(), true);
break;
}
- if (is_optional) {
- SourceRange chain_link_range(optional_link_begin, end_position());
- impl()->RecordExpressionSourceRange(result, chain_link_range);
- is_optional = false;
- }
- } while (Token::IsPropertyOrCall(peek()));
+ is_optional = false;
+ } while (is_optional || Token::IsPropertyOrCall(peek()));
if (optional_chaining) return factory()->NewOptionalChain(result);
return result;
}
@@ -3446,10 +3438,7 @@ ParserBase<Impl>::ParseMemberWithPresentNewPrefixesExpression() {
if (peek() == Token::SUPER) {
const bool is_new = true;
result = ParseSuperExpression(is_new);
- } else if (flags().allow_harmony_dynamic_import() &&
- peek() == Token::IMPORT &&
- (!flags().allow_harmony_import_meta() ||
- PeekAhead() == Token::LPAREN)) {
+ } else if (peek() == Token::IMPORT && PeekAhead() == Token::LPAREN) {
impl()->ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kImportCallNotNewExpression);
return impl()->FailureExpression();
@@ -3547,11 +3536,9 @@ ParserBase<Impl>::ParseMemberExpression() {
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::ParseImportExpressions() {
- DCHECK(flags().allow_harmony_dynamic_import());
-
Consume(Token::IMPORT);
int pos = position();
- if (flags().allow_harmony_import_meta() && Check(Token::PERIOD)) {
+ if (Check(Token::PERIOD)) {
ExpectContextualKeyword(ast_value_factory()->meta_string(), "import.meta",
pos);
if (!flags().is_module()) {
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index b05ae32bba..c65c1dc6b6 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -27,6 +27,7 @@
#include "src/runtime/runtime.h"
#include "src/strings/char-predicates-inl.h"
#include "src/strings/string-stream.h"
+#include "src/strings/unicode-inl.h"
#include "src/tracing/trace-event.h"
#include "src/zone/zone-list-inl.h"
@@ -1033,9 +1034,7 @@ Statement* Parser::ParseModuleItem() {
// We must be careful not to parse a dynamic import expression as an import
// declaration. Same for import.meta expressions.
Token::Value peek_ahead = PeekAhead();
- if ((!flags().allow_harmony_dynamic_import() ||
- peek_ahead != Token::LPAREN) &&
- (!flags().allow_harmony_import_meta() || peek_ahead != Token::PERIOD)) {
+ if (peek_ahead != Token::LPAREN && peek_ahead != Token::PERIOD) {
ParseImportDeclaration();
return factory()->EmptyStatement();
}
@@ -1071,7 +1070,8 @@ const AstRawString* Parser::ParseModuleSpecifier() {
}
ZoneChunkList<Parser::ExportClauseData>* Parser::ParseExportClause(
- Scanner::Location* reserved_loc) {
+ Scanner::Location* reserved_loc,
+ Scanner::Location* string_literal_local_name_loc) {
// ExportClause :
// '{' '}'
// '{' ExportsList '}'
@@ -1084,6 +1084,12 @@ ZoneChunkList<Parser::ExportClauseData>* Parser::ParseExportClause(
// ExportSpecifier :
// IdentifierName
// IdentifierName 'as' IdentifierName
+ // IdentifierName 'as' ModuleExportName
+ // ModuleExportName
+ // ModuleExportName 'as' ModuleExportName
+ //
+ // ModuleExportName :
+ // StringLiteral
ZoneChunkList<ExportClauseData>* export_data =
zone()->New<ZoneChunkList<ExportClauseData>>(zone());
@@ -1091,23 +1097,27 @@ ZoneChunkList<Parser::ExportClauseData>* Parser::ParseExportClause(
Token::Value name_tok;
while ((name_tok = peek()) != Token::RBRACE) {
- // Keep track of the first reserved word encountered in case our
- // caller needs to report an error.
- if (!reserved_loc->IsValid() &&
- !Token::IsValidIdentifier(name_tok, LanguageMode::kStrict, false,
- flags().is_module())) {
+ const AstRawString* local_name = ParseExportSpecifierName();
+ if (!string_literal_local_name_loc->IsValid() &&
+ name_tok == Token::STRING) {
+ // Keep track of the first string literal local name exported for error
+ // reporting. These must be followed by a 'from' clause.
+ *string_literal_local_name_loc = scanner()->location();
+ } else if (!reserved_loc->IsValid() &&
+ !Token::IsValidIdentifier(name_tok, LanguageMode::kStrict, false,
+ flags().is_module())) {
+ // Keep track of the first reserved word encountered in case our
+ // caller needs to report an error.
*reserved_loc = scanner()->location();
}
- const AstRawString* local_name = ParsePropertyName();
- const AstRawString* export_name = nullptr;
+ const AstRawString* export_name;
Scanner::Location location = scanner()->location();
if (CheckContextualKeyword(ast_value_factory()->as_string())) {
- export_name = ParsePropertyName();
+ export_name = ParseExportSpecifierName();
// Set the location to the whole "a as b" string, so that it makes sense
// both for errors due to "a" and for errors due to "b".
location.end_pos = scanner()->location().end_pos;
- }
- if (export_name == nullptr) {
+ } else {
export_name = local_name;
}
export_data->push_back({export_name, local_name, location});
@@ -1122,6 +1132,31 @@ ZoneChunkList<Parser::ExportClauseData>* Parser::ParseExportClause(
return export_data;
}
+const AstRawString* Parser::ParseExportSpecifierName() {
+ Token::Value next = Next();
+
+ // IdentifierName
+ if (V8_LIKELY(Token::IsPropertyName(next))) {
+ return GetSymbol();
+ }
+
+ // ModuleExportName
+ if (next == Token::STRING) {
+ const AstRawString* export_name = GetSymbol();
+ if (V8_LIKELY(export_name->is_one_byte())) return export_name;
+ if (!unibrow::Utf16::HasUnpairedSurrogate(
+ reinterpret_cast<const uint16_t*>(export_name->raw_data()),
+ export_name->length())) {
+ return export_name;
+ }
+ ReportMessage(MessageTemplate::kInvalidModuleExportName);
+ return EmptyIdentifierString();
+ }
+
+ ReportUnexpectedToken(next);
+ return EmptyIdentifierString();
+}
+
ZonePtrList<const Parser::NamedImport>* Parser::ParseNamedImports(int pos) {
// NamedImports :
// '{' '}'
@@ -1135,12 +1170,13 @@ ZonePtrList<const Parser::NamedImport>* Parser::ParseNamedImports(int pos) {
// ImportSpecifier :
// BindingIdentifier
// IdentifierName 'as' BindingIdentifier
+ // ModuleExportName 'as' BindingIdentifier
Expect(Token::LBRACE);
auto result = zone()->New<ZonePtrList<const NamedImport>>(1, zone());
while (peek() != Token::RBRACE) {
- const AstRawString* import_name = ParsePropertyName();
+ const AstRawString* import_name = ParseExportSpecifierName();
const AstRawString* local_name = import_name;
Scanner::Location location = scanner()->location();
// In the presence of 'as', the left-side of the 'as' can
@@ -1174,10 +1210,80 @@ ZonePtrList<const Parser::NamedImport>* Parser::ParseNamedImports(int pos) {
return result;
}
+Parser::ImportAssertions* Parser::ParseImportAssertClause() {
+ // AssertClause :
+ // assert '{' '}'
+ // assert '{' AssertEntries '}'
+
+ // AssertEntries :
+ // IdentifierName: AssertionKey
+ // IdentifierName: AssertionKey , AssertEntries
+
+ // AssertionKey :
+ // IdentifierName
+ // StringLiteral
+
+ auto import_assertions = zone()->New<ImportAssertions>(zone());
+
+ if (!FLAG_harmony_import_assertions) {
+ return import_assertions;
+ }
+
+ // Assert clause is optional, and cannot be preceded by a LineTerminator.
+ if (scanner()->HasLineTerminatorBeforeNext() ||
+ !CheckContextualKeyword(ast_value_factory()->assert_string())) {
+ return import_assertions;
+ }
+
+ Expect(Token::LBRACE);
+
+ while (peek() != Token::RBRACE) {
+ const AstRawString* attribute_key = nullptr;
+ if (Check(Token::STRING)) {
+ attribute_key = GetSymbol();
+ } else {
+ attribute_key = ParsePropertyName();
+ }
+
+ Scanner::Location location = scanner()->location();
+
+ Expect(Token::COLON);
+ Expect(Token::STRING);
+
+ const AstRawString* attribute_value = GetSymbol();
+
+ // Set the location to the whole "key: 'value'"" string, so that it makes
+ // sense both for errors due to the key and errors due to the value.
+ location.end_pos = scanner()->location().end_pos;
+
+ auto result = import_assertions->insert(std::make_pair(
+ attribute_key, std::make_pair(attribute_value, location)));
+ if (!result.second) {
+ // It is a syntax error if two AssertEntries have the same key.
+ ReportMessageAt(location, MessageTemplate::kImportAssertionDuplicateKey,
+ attribute_key);
+ break;
+ }
+
+ if (peek() == Token::RBRACE) break;
+ if (V8_UNLIKELY(!Check(Token::COMMA))) {
+ ReportUnexpectedToken(Next());
+ break;
+ }
+ }
+
+ Expect(Token::RBRACE);
+
+ return import_assertions;
+}
+
void Parser::ParseImportDeclaration() {
// ImportDeclaration :
// 'import' ImportClause 'from' ModuleSpecifier ';'
// 'import' ModuleSpecifier ';'
+ // 'import' ImportClause 'from' ModuleSpecifier [no LineTerminator here]
+ // AssertClause ';'
+ // 'import' ModuleSpecifier [no LineTerminator here] AssertClause';'
//
// ImportClause :
// ImportedDefaultBinding
@@ -1198,8 +1304,10 @@ void Parser::ParseImportDeclaration() {
if (tok == Token::STRING) {
Scanner::Location specifier_loc = scanner()->peek_location();
const AstRawString* module_specifier = ParseModuleSpecifier();
+ const ImportAssertions* import_assertions = ParseImportAssertClause();
ExpectSemicolon();
- module()->AddEmptyImport(module_specifier, specifier_loc);
+ module()->AddEmptyImport(module_specifier, import_assertions, specifier_loc,
+ zone());
return;
}
@@ -1242,6 +1350,7 @@ void Parser::ParseImportDeclaration() {
ExpectContextualKeyword(ast_value_factory()->from_string());
Scanner::Location specifier_loc = scanner()->peek_location();
const AstRawString* module_specifier = ParseModuleSpecifier();
+ const ImportAssertions* import_assertions = ParseImportAssertClause();
ExpectSemicolon();
// Now that we have all the information, we can make the appropriate
@@ -1254,24 +1363,26 @@ void Parser::ParseImportDeclaration() {
if (module_namespace_binding != nullptr) {
module()->AddStarImport(module_namespace_binding, module_specifier,
- module_namespace_binding_loc, specifier_loc,
- zone());
+ import_assertions, module_namespace_binding_loc,
+ specifier_loc, zone());
}
if (import_default_binding != nullptr) {
module()->AddImport(ast_value_factory()->default_string(),
import_default_binding, module_specifier,
- import_default_binding_loc, specifier_loc, zone());
+ import_assertions, import_default_binding_loc,
+ specifier_loc, zone());
}
if (named_imports != nullptr) {
if (named_imports->length() == 0) {
- module()->AddEmptyImport(module_specifier, specifier_loc);
+ module()->AddEmptyImport(module_specifier, import_assertions,
+ specifier_loc, zone());
} else {
for (const NamedImport* import : *named_imports) {
module()->AddImport(import->import_name, import->local_name,
- module_specifier, import->location, specifier_loc,
- zone());
+ module_specifier, import_assertions,
+ import->location, specifier_loc, zone());
}
}
}
@@ -1354,18 +1465,18 @@ void Parser::ParseExportStar() {
int pos = position();
Consume(Token::MUL);
- if (!FLAG_harmony_namespace_exports ||
- !PeekContextualKeyword(ast_value_factory()->as_string())) {
+ if (!PeekContextualKeyword(ast_value_factory()->as_string())) {
// 'export' '*' 'from' ModuleSpecifier ';'
Scanner::Location loc = scanner()->location();
ExpectContextualKeyword(ast_value_factory()->from_string());
Scanner::Location specifier_loc = scanner()->peek_location();
const AstRawString* module_specifier = ParseModuleSpecifier();
+ const ImportAssertions* import_assertions = ParseImportAssertClause();
ExpectSemicolon();
- module()->AddStarExport(module_specifier, loc, specifier_loc, zone());
+ module()->AddStarExport(module_specifier, import_assertions, loc,
+ specifier_loc, zone());
return;
}
- if (!FLAG_harmony_namespace_exports) return;
// 'export' '*' 'as' IdentifierName 'from' ModuleSpecifier ';'
//
@@ -1373,9 +1484,14 @@ void Parser::ParseExportStar() {
// export * as x from "...";
// ~>
// import * as .x from "..."; export {.x as x};
+ //
+ // Note that the desugared internal namespace export name (.x above) will
+ // never conflict with a string literal export name, as literal string export
+ // names in local name positions (i.e. left of 'as' or in a clause without
+ // 'as') are disallowed without a following 'from' clause.
ExpectContextualKeyword(ast_value_factory()->as_string());
- const AstRawString* export_name = ParsePropertyName();
+ const AstRawString* export_name = ParseExportSpecifierName();
Scanner::Location export_name_loc = scanner()->location();
const AstRawString* local_name = NextInternalNamespaceExportName();
Scanner::Location local_name_loc = Scanner::Location::invalid();
@@ -1385,21 +1501,34 @@ void Parser::ParseExportStar() {
ExpectContextualKeyword(ast_value_factory()->from_string());
Scanner::Location specifier_loc = scanner()->peek_location();
const AstRawString* module_specifier = ParseModuleSpecifier();
+ const ImportAssertions* import_assertions = ParseImportAssertClause();
ExpectSemicolon();
- module()->AddStarImport(local_name, module_specifier, local_name_loc,
- specifier_loc, zone());
+ module()->AddStarImport(local_name, module_specifier, import_assertions,
+ local_name_loc, specifier_loc, zone());
module()->AddExport(local_name, export_name, export_name_loc, zone());
}
Statement* Parser::ParseExportDeclaration() {
// ExportDeclaration:
// 'export' '*' 'from' ModuleSpecifier ';'
+ // 'export' '*' 'from' ModuleSpecifier [no LineTerminator here]
+ // AssertClause ';'
// 'export' '*' 'as' IdentifierName 'from' ModuleSpecifier ';'
+ // 'export' '*' 'as' IdentifierName 'from' ModuleSpecifier
+ // [no LineTerminator here] AssertClause ';'
+ // 'export' '*' 'as' ModuleExportName 'from' ModuleSpecifier ';'
+ // 'export' '*' 'as' ModuleExportName 'from' ModuleSpecifier ';'
+ // [no LineTerminator here] AssertClause ';'
// 'export' ExportClause ('from' ModuleSpecifier)? ';'
+ // 'export' ExportClause ('from' ModuleSpecifier [no LineTerminator here]
+ // AssertClause)? ';'
// 'export' VariableStatement
// 'export' Declaration
// 'export' 'default' ... (handled in ParseExportDefault)
+ //
+ // ModuleExportName :
+ // StringLiteral
Expect(Token::EXPORT);
Statement* result = nullptr;
@@ -1426,30 +1555,41 @@ Statement* Parser::ParseExportDeclaration() {
// encountered, and then throw a SyntaxError if we are in the
// non-FromClause case.
Scanner::Location reserved_loc = Scanner::Location::invalid();
+ Scanner::Location string_literal_local_name_loc =
+ Scanner::Location::invalid();
ZoneChunkList<ExportClauseData>* export_data =
- ParseExportClause(&reserved_loc);
- const AstRawString* module_specifier = nullptr;
- Scanner::Location specifier_loc;
+ ParseExportClause(&reserved_loc, &string_literal_local_name_loc);
if (CheckContextualKeyword(ast_value_factory()->from_string())) {
- specifier_loc = scanner()->peek_location();
- module_specifier = ParseModuleSpecifier();
- } else if (reserved_loc.IsValid()) {
- // No FromClause, so reserved words are invalid in ExportClause.
- ReportMessageAt(reserved_loc, MessageTemplate::kUnexpectedReserved);
- return nullptr;
- }
- ExpectSemicolon();
- if (module_specifier == nullptr) {
- for (const ExportClauseData& data : *export_data) {
- module()->AddExport(data.local_name, data.export_name, data.location,
- zone());
+ Scanner::Location specifier_loc = scanner()->peek_location();
+ const AstRawString* module_specifier = ParseModuleSpecifier();
+ const ImportAssertions* import_assertions = ParseImportAssertClause();
+ ExpectSemicolon();
+
+ if (export_data->is_empty()) {
+ module()->AddEmptyImport(module_specifier, import_assertions,
+ specifier_loc, zone());
+ } else {
+ for (const ExportClauseData& data : *export_data) {
+ module()->AddExport(data.local_name, data.export_name,
+ module_specifier, import_assertions,
+ data.location, specifier_loc, zone());
+ }
}
- } else if (export_data->is_empty()) {
- module()->AddEmptyImport(module_specifier, specifier_loc);
} else {
+ if (reserved_loc.IsValid()) {
+ // No FromClause, so reserved words are invalid in ExportClause.
+ ReportMessageAt(reserved_loc, MessageTemplate::kUnexpectedReserved);
+ return nullptr;
+ } else if (string_literal_local_name_loc.IsValid()) {
+ ReportMessageAt(string_literal_local_name_loc,
+ MessageTemplate::kModuleExportNameWithoutFromClause);
+ return nullptr;
+ }
+
+ ExpectSemicolon();
+
for (const ExportClauseData& data : *export_data) {
- module()->AddExport(data.local_name, data.export_name,
- module_specifier, data.location, specifier_loc,
+ module()->AddExport(data.local_name, data.export_name, data.location,
zone());
}
}
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 8897030a0c..073f517b56 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -269,7 +269,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Scanner::Location location;
};
ZoneChunkList<ExportClauseData>* ParseExportClause(
- Scanner::Location* reserved_loc);
+ Scanner::Location* reserved_loc,
+ Scanner::Location* string_literal_local_name_loc);
struct NamedImport : public ZoneObject {
const AstRawString* import_name;
const AstRawString* local_name;
@@ -280,7 +281,12 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
local_name(local_name),
location(location) {}
};
+ const AstRawString* ParseExportSpecifierName();
ZonePtrList<const NamedImport>* ParseNamedImports(int pos);
+ using ImportAssertions =
+ ZoneMap<const AstRawString*,
+ std::pair<const AstRawString*, Scanner::Location>>;
+ ImportAssertions* ParseImportAssertClause();
Statement* BuildInitializationBlock(DeclarationParsingResult* parsing_result);
Expression* RewriteReturn(Expression* return_value, int pos);
Statement* RewriteSwitchStatement(SwitchStatement* switch_statement,
@@ -997,14 +1003,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
node, zone()->New<IterationStatementSourceRanges>(body_range));
}
- // Used to record source ranges of expressions associated with optional chain:
- V8_INLINE void RecordExpressionSourceRange(Expression* node,
- const SourceRange& right_range) {
- if (source_range_map_ == nullptr) return;
- source_range_map_->Insert(node,
- zone()->New<ExpressionSourceRanges>(right_range));
- }
-
V8_INLINE void RecordSuspendSourceRange(Expression* node,
int32_t continuation_position) {
if (source_range_map_ == nullptr) return;
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index 942acf13f8..36461ee762 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -246,23 +246,40 @@ void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
is_set_ = true;
Visit(node->finally_block());
node->set_finally_block(replacement_->AsBlock());
- // Save .result value at the beginning of the finally block and restore it
- // at the end again: ".backup = .result; ...; .result = .backup"
- // This is necessary because the finally block does not normally contribute
- // to the completion value.
CHECK_NOT_NULL(closure_scope());
- Variable* backup = closure_scope()->NewTemporary(
- factory()->ast_value_factory()->dot_result_string());
- Expression* backup_proxy = factory()->NewVariableProxy(backup);
- Expression* result_proxy = factory()->NewVariableProxy(result_);
- Expression* save = factory()->NewAssignment(
- Token::ASSIGN, backup_proxy, result_proxy, kNoSourcePosition);
- Expression* restore = factory()->NewAssignment(
- Token::ASSIGN, result_proxy, backup_proxy, kNoSourcePosition);
- node->finally_block()->statements()->InsertAt(
- 0, factory()->NewExpressionStatement(save, kNoSourcePosition), zone());
- node->finally_block()->statements()->Add(
- factory()->NewExpressionStatement(restore, kNoSourcePosition), zone());
+ if (is_set_) {
+ // Save .result value at the beginning of the finally block and restore it
+ // at the end again: ".backup = .result; ...; .result = .backup" This is
+ // necessary because the finally block does not normally contribute to the
+ // completion value.
+ Variable* backup = closure_scope()->NewTemporary(
+ factory()->ast_value_factory()->dot_result_string());
+ Expression* backup_proxy = factory()->NewVariableProxy(backup);
+ Expression* result_proxy = factory()->NewVariableProxy(result_);
+ Expression* save = factory()->NewAssignment(
+ Token::ASSIGN, backup_proxy, result_proxy, kNoSourcePosition);
+ Expression* restore = factory()->NewAssignment(
+ Token::ASSIGN, result_proxy, backup_proxy, kNoSourcePosition);
+ node->finally_block()->statements()->InsertAt(
+ 0, factory()->NewExpressionStatement(save, kNoSourcePosition),
+ zone());
+ node->finally_block()->statements()->Add(
+ factory()->NewExpressionStatement(restore, kNoSourcePosition),
+ zone());
+ } else {
+ // If is_set_ is false, it means the finally block has a 'break' or a
+ // 'continue' and was not preceded by a statement that assigned to
+ // .result. Try-finally statements return the abrupt completions from the
+ // finally block, meaning this case should get an undefined.
+ //
+ // Since the finally block will definitely result in an abrupt completion,
+ // there's no need to save and restore the .result.
+ Expression* undef = factory()->NewUndefinedLiteral(kNoSourcePosition);
+ Expression* assignment = SetResult(undef);
+ node->finally_block()->statements()->InsertAt(
+ 0, factory()->NewExpressionStatement(assignment, kNoSourcePosition),
+ zone());
+ }
// We can't tell whether the finally-block is guaranteed to set .result, so
// reset is_set_ before visiting the try-block.
is_set_ = false;
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 1414b3490b..dde90d910f 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -331,7 +331,7 @@ class UnbufferedCharacterStream : public Utf16CharacterStream {
// Provides a unbuffered utf-16 view on the bytes from the underlying
// ByteStream.
-class RelocatingCharacterStream
+class RelocatingCharacterStream final
: public UnbufferedCharacterStream<OnHeapStream> {
public:
template <class... TArgs>
@@ -422,7 +422,7 @@ bool BufferedUtf16CharacterStream::ReadBlock() {
// TODO(verwaest): Decode utf8 chunks into utf16 chunks on the blink side
// instead so we don't need to buffer.
-class Utf8ExternalStreamingStream : public BufferedUtf16CharacterStream {
+class Utf8ExternalStreamingStream final : public BufferedUtf16CharacterStream {
public:
Utf8ExternalStreamingStream(
ScriptCompiler::ExternalSourceStream* source_stream)
diff --git a/deps/v8/src/profiler/DIR_METADATA b/deps/v8/src/profiler/DIR_METADATA
new file mode 100644
index 0000000000..3ba1106a5f
--- /dev/null
+++ b/deps/v8/src/profiler/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Platform>DevTools>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/profiler/OWNERS b/deps/v8/src/profiler/OWNERS
index 001abef49d..28a7353ef4 100644
--- a/deps/v8/src/profiler/OWNERS
+++ b/deps/v8/src/profiler/OWNERS
@@ -2,5 +2,3 @@ alph@chromium.org
petermarshall@chromium.org
per-file *heap*=ulan@chromium.org
-
-# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 3b72ef818a..6ee7539dda 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -18,6 +18,7 @@
#include "src/logging/log.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/profiler/profiler-stats.h"
+#include "src/profiler/symbolizer.h"
#include "src/utils/locked-queue-inl.h"
#include "src/wasm/wasm-engine.h"
@@ -96,10 +97,10 @@ ProfilingScope::~ProfilingScope() {
}
ProfilerEventsProcessor::ProfilerEventsProcessor(
- Isolate* isolate, ProfileGenerator* generator,
+ Isolate* isolate, Symbolizer* symbolizer,
ProfilerCodeObserver* code_observer)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
- generator_(generator),
+ symbolizer_(symbolizer),
code_observer_(code_observer),
last_code_event_id_(0),
last_processed_code_event_id_(0),
@@ -109,11 +110,12 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(
}
SamplingEventsProcessor::SamplingEventsProcessor(
- Isolate* isolate, ProfileGenerator* generator,
- ProfilerCodeObserver* code_observer, base::TimeDelta period,
- bool use_precise_sampling)
- : ProfilerEventsProcessor(isolate, generator, code_observer),
+ Isolate* isolate, Symbolizer* symbolizer,
+ ProfilerCodeObserver* code_observer, CpuProfilesCollection* profiles,
+ base::TimeDelta period, bool use_precise_sampling)
+ : ProfilerEventsProcessor(isolate, symbolizer, code_observer),
sampler_(new CpuSampler(isolate, this)),
+ profiles_(profiles),
period_(period),
use_precise_sampling_(use_precise_sampling) {
sampler_->Start();
@@ -209,6 +211,15 @@ void ProfilerEventsProcessor::CodeEventHandler(
}
}
+void SamplingEventsProcessor::SymbolizeAndAddToProfiles(
+ const TickSampleEventRecord* record) {
+ Symbolizer::SymbolizedSample symbolized =
+ symbolizer_->SymbolizeTickSample(record->sample);
+ profiles_->AddPathToCurrentProfiles(
+ record->sample.timestamp, symbolized.stack_trace, symbolized.src_line,
+ record->sample.update_stats, record->sample.sampling_interval);
+}
+
ProfilerEventsProcessor::SampleProcessingResult
SamplingEventsProcessor::ProcessOneSample() {
TickSampleEventRecord record1;
@@ -216,7 +227,7 @@ SamplingEventsProcessor::ProcessOneSample() {
(record1.order == last_processed_code_event_id_)) {
TickSampleEventRecord record;
ticks_from_vm_buffer_.Dequeue(&record);
- generator_->SymbolizeTickSample(record.sample);
+ SymbolizeAndAddToProfiles(&record);
return OneSampleProcessed;
}
@@ -228,7 +239,7 @@ SamplingEventsProcessor::ProcessOneSample() {
if (record->order != last_processed_code_event_id_) {
return FoundSampleForNextCodeEvent;
}
- generator_->SymbolizeTickSample(record->sample);
+ SymbolizeAndAddToProfiles(record);
ticks_buffer_.Remove();
return OneSampleProcessed;
}
@@ -315,6 +326,8 @@ ProfilerCodeObserver::ProfilerCodeObserver(Isolate* isolate)
LogBuiltins();
}
+void ProfilerCodeObserver::ClearCodeMap() { code_map_.Clear(); }
+
void ProfilerCodeObserver::CodeEventHandler(
const CodeEventsContainer& evt_rec) {
if (processor_) {
@@ -437,7 +450,7 @@ CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
CpuProfilingLoggingMode logging_mode,
CpuProfilesCollection* test_profiles,
- ProfileGenerator* test_generator,
+ Symbolizer* test_symbolizer,
ProfilerEventsProcessor* test_processor)
: isolate_(isolate),
naming_mode_(naming_mode),
@@ -445,7 +458,7 @@ CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
base_sampling_interval_(base::TimeDelta::FromMicroseconds(
FLAG_cpu_profiler_sampling_interval)),
profiles_(test_profiles),
- generator_(test_generator),
+ symbolizer_(test_symbolizer),
processor_(test_processor),
code_observer_(isolate),
is_profiling_(false) {
@@ -475,8 +488,11 @@ void CpuProfiler::set_use_precise_sampling(bool value) {
void CpuProfiler::ResetProfiles() {
profiles_.reset(new CpuProfilesCollection(isolate_));
profiles_->set_cpu_profiler(this);
- generator_.reset();
- if (!profiling_scope_) profiler_listener_.reset();
+ symbolizer_.reset();
+ if (!profiling_scope_) {
+ profiler_listener_.reset();
+ code_observer_.ClearCodeMap();
+ }
}
void CpuProfiler::EnableLogging() {
@@ -519,17 +535,25 @@ void CpuProfiler::CollectSample() {
}
}
-void CpuProfiler::StartProfiling(const char* title,
- CpuProfilingOptions options) {
- if (profiles_->StartProfiling(title, options)) {
+CpuProfilingStatus CpuProfiler::StartProfiling(const char* title,
+ CpuProfilingOptions options) {
+ StartProfilingStatus status = profiles_->StartProfiling(title, options);
+
+ // TODO(nicodubus): Revisit logic for if we want to do anything different for
+ // kAlreadyStarted
+ if (status == CpuProfilingStatus::kStarted ||
+ status == CpuProfilingStatus::kAlreadyStarted) {
TRACE_EVENT0("v8", "CpuProfiler::StartProfiling");
AdjustSamplingInterval();
StartProcessorIfNotStarted();
}
+
+ return status;
}
-void CpuProfiler::StartProfiling(String title, CpuProfilingOptions options) {
- StartProfiling(profiles_->GetName(title), options);
+CpuProfilingStatus CpuProfiler::StartProfiling(String title,
+ CpuProfilingOptions options) {
+ return StartProfiling(profiles_->GetName(title), options);
}
void CpuProfiler::StartProcessorIfNotStarted() {
@@ -543,15 +567,14 @@ void CpuProfiler::StartProcessorIfNotStarted() {
EnableLogging();
}
- if (!generator_) {
- generator_.reset(
- new ProfileGenerator(profiles_.get(), code_observer_.code_map()));
+ if (!symbolizer_) {
+ symbolizer_ = std::make_unique<Symbolizer>(code_observer_.code_map());
}
base::TimeDelta sampling_interval = ComputeSamplingInterval();
- processor_.reset(
- new SamplingEventsProcessor(isolate_, generator_.get(), &code_observer_,
- sampling_interval, use_precise_sampling_));
+ processor_.reset(new SamplingEventsProcessor(
+ isolate_, symbolizer_.get(), &code_observer_, profiles_.get(),
+ sampling_interval, use_precise_sampling_));
is_profiling_ = true;
// Enable stack sampling.
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index e8d977424b..e7ca3fbd7b 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -27,7 +27,7 @@ class CodeEntry;
class CodeMap;
class CpuProfilesCollection;
class Isolate;
-class ProfileGenerator;
+class Symbolizer;
#define CODE_EVENTS_TYPE_LIST(V) \
V(CODE_CREATION, CodeCreateEventRecord) \
@@ -165,7 +165,7 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
virtual void SetSamplingInterval(base::TimeDelta) {}
protected:
- ProfilerEventsProcessor(Isolate* isolate, ProfileGenerator* generator,
+ ProfilerEventsProcessor(Isolate* isolate, Symbolizer* symbolizer,
ProfilerCodeObserver* code_observer);
// Called from events processing thread (Run() method.)
@@ -178,7 +178,7 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
};
virtual SampleProcessingResult ProcessOneSample() = 0;
- ProfileGenerator* generator_;
+ Symbolizer* symbolizer_;
ProfilerCodeObserver* code_observer_;
std::atomic_bool running_{true};
base::ConditionVariable running_cond_;
@@ -193,8 +193,9 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
class V8_EXPORT_PRIVATE SamplingEventsProcessor
: public ProfilerEventsProcessor {
public:
- SamplingEventsProcessor(Isolate* isolate, ProfileGenerator* generator,
+ SamplingEventsProcessor(Isolate* isolate, Symbolizer* symbolizer,
ProfilerCodeObserver* code_observer,
+ CpuProfilesCollection* profiles,
base::TimeDelta period, bool use_precise_sampling);
~SamplingEventsProcessor() override;
@@ -221,6 +222,7 @@ class V8_EXPORT_PRIVATE SamplingEventsProcessor
private:
SampleProcessingResult ProcessOneSample() override;
+ void SymbolizeAndAddToProfiles(const TickSampleEventRecord* record);
static const size_t kTickSampleBufferSize = 512 * KB;
static const size_t kTickSampleQueueLength =
@@ -228,6 +230,7 @@ class V8_EXPORT_PRIVATE SamplingEventsProcessor
SamplingCircularQueue<TickSampleEventRecord,
kTickSampleQueueLength> ticks_buffer_;
std::unique_ptr<sampler::Sampler> sampler_;
+ CpuProfilesCollection* profiles_;
base::TimeDelta period_; // Samples & code events processing period.
const bool use_precise_sampling_; // Whether or not busy-waiting is used for
// low sampling intervals on Windows.
@@ -243,6 +246,7 @@ class V8_EXPORT_PRIVATE ProfilerCodeObserver : public CodeEventObserver {
void CodeEventHandler(const CodeEventsContainer& evt_rec) override;
CodeMap* code_map() { return &code_map_; }
+ void ClearCodeMap();
private:
friend class ProfilerEventsProcessor;
@@ -294,7 +298,7 @@ class V8_EXPORT_PRIVATE CpuProfiler {
CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
CpuProfilingLoggingMode logging_mode,
- CpuProfilesCollection* profiles, ProfileGenerator* test_generator,
+ CpuProfilesCollection* profiles, Symbolizer* test_symbolizer,
ProfilerEventsProcessor* test_processor);
~CpuProfiler();
@@ -304,13 +308,16 @@ class V8_EXPORT_PRIVATE CpuProfiler {
using ProfilingMode = v8::CpuProfilingMode;
using NamingMode = v8::CpuProfilingNamingMode;
using LoggingMode = v8::CpuProfilingLoggingMode;
+ using StartProfilingStatus = CpuProfilingStatus;
base::TimeDelta sampling_interval() const { return base_sampling_interval_; }
void set_sampling_interval(base::TimeDelta value);
void set_use_precise_sampling(bool);
void CollectSample();
- void StartProfiling(const char* title, CpuProfilingOptions options = {});
- void StartProfiling(String title, CpuProfilingOptions options = {});
+ StartProfilingStatus StartProfiling(const char* title,
+ CpuProfilingOptions options = {});
+ StartProfilingStatus StartProfiling(String title,
+ CpuProfilingOptions options = {});
CpuProfile* StopProfiling(const char* title);
CpuProfile* StopProfiling(String title);
@@ -321,13 +328,14 @@ class V8_EXPORT_PRIVATE CpuProfiler {
bool is_profiling() const { return is_profiling_; }
- ProfileGenerator* generator() const { return generator_.get(); }
+ Symbolizer* symbolizer() const { return symbolizer_.get(); }
ProfilerEventsProcessor* processor() const { return processor_.get(); }
Isolate* isolate() const { return isolate_; }
ProfilerListener* profiler_listener_for_test() const {
return profiler_listener_.get();
}
+ CodeMap* code_map_for_test() { return code_observer_.code_map(); }
private:
void StartProcessorIfNotStarted();
@@ -352,7 +360,7 @@ class V8_EXPORT_PRIVATE CpuProfiler {
// to a multiple of, or used as the default if unspecified.
base::TimeDelta base_sampling_interval_;
std::unique_ptr<CpuProfilesCollection> profiles_;
- std::unique_ptr<ProfileGenerator> generator_;
+ std::unique_ptr<Symbolizer> symbolizer_;
std::unique_ptr<ProfilerEventsProcessor> processor_;
std::unique_ptr<ProfilerListener> profiler_listener_;
std::unique_ptr<ProfilingScope> profiling_scope_;
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index fc9bd00f47..f742b7e1cc 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -64,6 +64,19 @@ void HeapProfiler::BuildEmbedderGraph(Isolate* isolate,
}
}
+void HeapProfiler::SetGetDetachednessCallback(
+ v8::HeapProfiler::GetDetachednessCallback callback, void* data) {
+ get_detachedness_callback_ = {callback, data};
+}
+
+v8::EmbedderGraph::Node::Detachedness HeapProfiler::GetDetachedness(
+ const v8::Local<v8::Value> v8_value, uint16_t class_id) {
+ DCHECK(HasGetDetachednessCallback());
+ return get_detachedness_callback_.first(
+ reinterpret_cast<v8::Isolate*>(heap()->isolate()), v8_value, class_id,
+ get_detachedness_callback_.second);
+}
+
HeapSnapshot* HeapProfiler::TakeSnapshot(
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver,
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index 21d9bb8fcf..67fd1e5bd8 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -72,6 +72,14 @@ class HeapProfiler : public HeapObjectAllocationTracker {
return !build_embedder_graph_callbacks_.empty();
}
+ void SetGetDetachednessCallback(
+ v8::HeapProfiler::GetDetachednessCallback callback, void* data);
+ bool HasGetDetachednessCallback() const {
+ return get_detachedness_callback_.first != nullptr;
+ }
+ v8::EmbedderGraph::Node::Detachedness GetDetachedness(
+ const v8::Local<v8::Value> v8_value, uint16_t class_id);
+
bool is_tracking_object_moves() const { return is_tracking_object_moves_; }
Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
@@ -99,6 +107,8 @@ class HeapProfiler : public HeapObjectAllocationTracker {
std::unique_ptr<SamplingHeapProfiler> sampling_heap_profiler_;
std::vector<std::pair<v8::HeapProfiler::BuildEmbedderGraphCallback, void*>>
build_embedder_graph_callbacks_;
+ std::pair<v8::HeapProfiler::GetDetachednessCallback, void*>
+ get_detachedness_callback_;
DISALLOW_COPY_AND_ASSIGN(HeapProfiler);
};
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 0c5af20b01..2907a215c6 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -7,6 +7,7 @@
#include <utility>
#include "src/api/api-inl.h"
+#include "src/base/optional.h"
#include "src/codegen/assembler-inl.h"
#include "src/common/globals.h"
#include "src/debug/debug.h"
@@ -103,8 +104,8 @@ void HeapEntry::SetNamedAutoIndexReference(HeapGraphEdge::Type type,
SetNamedReference(type, name, child);
}
-void HeapEntry::Print(
- const char* prefix, const char* edge_name, int max_depth, int indent) {
+void HeapEntry::Print(const char* prefix, const char* edge_name, int max_depth,
+ int indent) const {
STATIC_ASSERT(sizeof(unsigned) == sizeof(id()));
base::OS::Print("%6zu @%6u %*c %s%s: ", self_size(), id(), indent, ' ',
prefix, edge_name);
@@ -162,7 +163,7 @@ void HeapEntry::Print(
}
}
-const char* HeapEntry::TypeAsString() {
+const char* HeapEntry::TypeAsString() const {
switch (type()) {
case kHidden: return "/hidden/";
case kObject: return "/object/";
@@ -578,9 +579,9 @@ void V8HeapExplorer::ExtractLocationForJSFunction(HeapEntry* entry,
Script script = Script::cast(func.shared().script());
int scriptId = script.id();
int start = func.shared().StartPosition();
- int line = script.GetLineNumber(start);
- int col = script.GetColumnNumber(start);
- snapshot_->AddLocation(entry, scriptId, line, col);
+ Script::PositionInfo info;
+ script.GetPositionInfo(start, &info, Script::WITH_OFFSET);
+ snapshot_->AddLocation(entry, scriptId, info.line, info.column);
}
HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
@@ -598,8 +599,8 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
const char* name = names_->GetName(
GetConstructorName(JSObject::cast(object)));
if (object.IsJSGlobalObject()) {
- auto it = objects_tags_.find(JSGlobalObject::cast(object));
- if (it != objects_tags_.end()) {
+ auto it = global_object_tag_map_.find(JSGlobalObject::cast(object));
+ if (it != global_object_tag_map_.end()) {
name = names_->GetFormatted("%s / %s", name, it->second);
}
}
@@ -1066,14 +1067,15 @@ void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map map) {
Map::kTransitionsOrPrototypeInfoOffset);
}
}
- DescriptorArray descriptors = map.instance_descriptors();
+ DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
TagObject(descriptors, "(map descriptors)");
SetInternalReference(entry, "descriptors", descriptors,
Map::kInstanceDescriptorsOffset);
SetInternalReference(entry, "prototype", map.prototype(),
Map::kPrototypeOffset);
if (FLAG_unbox_double_fields) {
- SetInternalReference(entry, "layout_descriptor", map.layout_descriptor(),
+ SetInternalReference(entry, "layout_descriptor",
+ map.layout_descriptor(kAcquireLoad),
Map::kLayoutDescriptorOffset);
}
if (map.IsContextMap()) {
@@ -1115,15 +1117,17 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
CodeKindToString(shared.GetCode().kind())));
}
- if (shared.name_or_scope_info().IsScopeInfo()) {
- TagObject(shared.name_or_scope_info(), "(function scope info)");
+ Object name_or_scope_info = shared.name_or_scope_info(kAcquireLoad);
+ if (name_or_scope_info.IsScopeInfo()) {
+ TagObject(name_or_scope_info, "(function scope info)");
}
- SetInternalReference(entry, "name_or_scope_info", shared.name_or_scope_info(),
+ SetInternalReference(entry, "name_or_scope_info", name_or_scope_info,
SharedFunctionInfo::kNameOrScopeInfoOffset);
SetInternalReference(entry, "script_or_debug_info",
- shared.script_or_debug_info(),
+ shared.script_or_debug_info(kAcquireLoad),
SharedFunctionInfo::kScriptOrDebugInfoOffset);
- SetInternalReference(entry, "function_data", shared.function_data(),
+ SetInternalReference(entry, "function_data",
+ shared.function_data(kAcquireLoad),
SharedFunctionInfo::kFunctionDataOffset);
SetInternalReference(
entry, "raw_outer_scope_info_or_feedback_metadata",
@@ -1277,11 +1281,11 @@ void V8HeapExplorer::ExtractFixedArrayReferences(HeapEntry* entry,
void V8HeapExplorer::ExtractFeedbackVectorReferences(
HeapEntry* entry, FeedbackVector feedback_vector) {
- MaybeObject code = feedback_vector.optimized_code_weak_or_smi();
+ MaybeObject code = feedback_vector.maybe_optimized_code();
HeapObject code_heap_object;
if (code->GetHeapObjectIfWeak(&code_heap_object)) {
SetWeakReference(entry, "optimized code", code_heap_object,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset);
+ FeedbackVector::kMaybeOptimizedCodeOffset);
}
}
@@ -1324,7 +1328,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject js_obj,
HeapEntry* entry) {
Isolate* isolate = js_obj.GetIsolate();
if (js_obj.HasFastProperties()) {
- DescriptorArray descs = js_obj.map().instance_descriptors();
+ DescriptorArray descs = js_obj.map().instance_descriptors(kRelaxedLoad);
for (InternalIndex i : js_obj.map().IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
switch (details.location()) {
@@ -1477,7 +1481,7 @@ class RootsReferencesExtractor : public RootVisitor {
OffHeapObjectSlot start,
OffHeapObjectSlot end) override {
DCHECK_EQ(root, Root::kStringTable);
- const Isolate* isolate = Isolate::FromHeap(explorer_->heap_);
+ IsolateRoot isolate = Isolate::FromHeap(explorer_->heap_);
for (OffHeapObjectSlot p = start; p < end; ++p) {
explorer_->SetGcSubrootReference(root, description, visiting_weak_roots_,
p.load(isolate));
@@ -1819,22 +1823,26 @@ class GlobalObjectsEnumerator : public RootVisitor {
// Modifies heap. Must not be run during heap traversal.
-void V8HeapExplorer::TagGlobalObjects() {
+void V8HeapExplorer::CollectGlobalObjectsTags() {
+ if (!global_object_name_resolver_) return;
+
Isolate* isolate = Isolate::FromHeap(heap_);
- HandleScope scope(isolate);
GlobalObjectsEnumerator enumerator(isolate);
isolate->global_handles()->IterateAllRoots(&enumerator);
- std::vector<const char*> urls(enumerator.count());
for (int i = 0, l = enumerator.count(); i < l; ++i) {
- urls[i] = global_object_name_resolver_
- ? global_object_name_resolver_->GetName(Utils::ToLocal(
- Handle<JSObject>::cast(enumerator.at(i))))
- : nullptr;
+ Handle<JSGlobalObject> obj = enumerator.at(i);
+ const char* tag = global_object_name_resolver_->GetName(
+ Utils::ToLocal(Handle<JSObject>::cast(obj)));
+ if (tag) {
+ global_object_tag_pairs_.emplace_back(obj, tag);
+ }
}
+}
- DisallowHeapAllocation no_allocation;
- for (int i = 0, l = enumerator.count(); i < l; ++i) {
- if (urls[i]) objects_tags_.emplace(*enumerator.at(i), urls[i]);
+void V8HeapExplorer::MakeGlobalObjectTagMap(
+ const SafepointScope& safepoint_scope) {
+ for (const auto& pair : global_object_tag_pairs_) {
+ global_object_tag_map_.emplace(*pair.first, pair.second);
}
}
@@ -2077,19 +2085,16 @@ class NullContextForSnapshotScope {
} // namespace
bool HeapSnapshotGenerator::GenerateSnapshot() {
- v8_heap_explorer_.TagGlobalObjects();
+ Isolate* isolate = Isolate::FromHeap(heap_);
+ base::Optional<HandleScope> handle_scope(base::in_place, isolate);
+ v8_heap_explorer_.CollectGlobalObjectsTags();
- // TODO(1562) Profiler assumes that any object that is in the heap after
- // full GC is reachable from the root when computing dominators.
- // This is not true for weakly reachable objects.
- // As a temporary solution we call GC twice.
- heap_->PreciseCollectAllGarbage(Heap::kNoGCFlags,
- GarbageCollectionReason::kHeapProfiler);
- heap_->PreciseCollectAllGarbage(Heap::kNoGCFlags,
- GarbageCollectionReason::kHeapProfiler);
+ heap_->CollectAllAvailableGarbage(GarbageCollectionReason::kHeapProfiler);
- NullContextForSnapshotScope null_context_scope(Isolate::FromHeap(heap_));
+ NullContextForSnapshotScope null_context_scope(isolate);
SafepointScope scope(heap_);
+ v8_heap_explorer_.MakeGlobalObjectTagMap(scope);
+ handle_scope.reset();
#ifdef VERIFY_HEAP
Heap* debug_heap = heap_;
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index fcf253ea35..df95787f96 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -38,6 +38,7 @@ class JSGlobalObject;
class JSGlobalProxy;
class JSPromise;
class JSWeakCollection;
+class SafepointScope;
struct SourceLocation {
SourceLocation(int entry_index, int scriptId, int line, int col)
@@ -151,12 +152,12 @@ class HeapEntry {
StringsStorage* strings);
V8_EXPORT_PRIVATE void Print(const char* prefix, const char* edge_name,
- int max_depth, int indent);
+ int max_depth, int indent) const;
private:
V8_INLINE std::vector<HeapGraphEdge*>::iterator children_begin() const;
V8_INLINE std::vector<HeapGraphEdge*>::iterator children_end() const;
- const char* TypeAsString();
+ const char* TypeAsString() const;
unsigned type_: 4;
unsigned index_ : 28; // Supports up to ~250M objects.
@@ -196,7 +197,9 @@ class HeapSnapshot {
return gc_subroot_entries_[static_cast<int>(root)];
}
std::deque<HeapEntry>& entries() { return entries_; }
+ const std::deque<HeapEntry>& entries() const { return entries_; }
std::deque<HeapGraphEdge>& edges() { return edges_; }
+ const std::deque<HeapGraphEdge>& edges() const { return edges_; }
std::vector<HeapGraphEdge*>& children() { return children_; }
const std::vector<SourceLocation>& locations() const { return locations_; }
void RememberLastJSObjectId();
@@ -338,7 +341,8 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
HeapEntry* AllocateEntry(HeapThing ptr) override;
int EstimateObjectsCount();
bool IterateAndExtractReferences(HeapSnapshotGenerator* generator);
- void TagGlobalObjects();
+ void CollectGlobalObjectsTags();
+ void MakeGlobalObjectTagMap(const SafepointScope& safepoint_scope);
void TagBuiltinCodeObject(Code code, const char* name);
HeapEntry* AddEntry(Address address,
HeapEntry::Type type,
@@ -445,7 +449,10 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
HeapObjectsMap* heap_object_map_;
SnapshottingProgressReportingInterface* progress_;
HeapSnapshotGenerator* generator_ = nullptr;
- std::unordered_map<JSGlobalObject, const char*, Object::Hasher> objects_tags_;
+ std::vector<std::pair<Handle<JSGlobalObject>, const char*>>
+ global_object_tag_pairs_;
+ std::unordered_map<JSGlobalObject, const char*, Object::Hasher>
+ global_object_tag_map_;
std::unordered_map<Object, const char*, Object::Hasher>
strong_gc_subroot_names_;
std::unordered_set<JSGlobalObject, Object::Hasher> user_roots_;
diff --git a/deps/v8/src/profiler/profile-generator-inl.h b/deps/v8/src/profiler/profile-generator-inl.h
index 3abacb7b61..8239bdb000 100644
--- a/deps/v8/src/profiler/profile-generator-inl.h
+++ b/deps/v8/src/profiler/profile-generator-inl.h
@@ -28,13 +28,6 @@ CodeEntry::CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
position_(0),
line_info_(std::move(line_info)) {}
-inline CodeEntry* ProfileGenerator::FindEntry(Address address,
- Address* out_instruction_start) {
- CodeEntry* entry = code_map_->FindEntry(address, out_instruction_start);
- if (entry) entry->mark_used();
- return entry;
-}
-
ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry,
ProfileNode* parent, int line_number)
: tree_(tree),
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index cf448fcd20..f3344c57a0 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -403,11 +403,11 @@ ProfileNode* ProfileTree::AddPathFromEnd(const ProfileStackTrace& path,
CodeEntry* last_entry = nullptr;
int parent_line_number = v8::CpuProfileNode::kNoLineNumberInfo;
for (auto it = path.rbegin(); it != path.rend(); ++it) {
- if (it->entry.code_entry == nullptr) continue;
- last_entry = (*it).entry.code_entry;
- node = node->FindOrAddChild((*it).entry.code_entry, parent_line_number);
+ if (it->code_entry == nullptr) continue;
+ last_entry = it->code_entry;
+ node = node->FindOrAddChild(it->code_entry, parent_line_number);
parent_line_number = mode == ProfilingMode::kCallerLineNumbers
- ? (*it).entry.line_number
+ ? it->line_number
: v8::CpuProfileNode::kNoLineNumberInfo;
}
if (last_entry && last_entry->has_deopt_info()) {
@@ -644,7 +644,9 @@ void CpuProfile::Print() const {
CodeMap::CodeMap() = default;
-CodeMap::~CodeMap() {
+CodeMap::~CodeMap() { Clear(); }
+
+void CodeMap::Clear() {
// First clean the free list as it's otherwise impossible to tell
// the slot type.
unsigned free_slot = free_list_head_;
@@ -654,6 +656,10 @@ CodeMap::~CodeMap() {
free_slot = next_slot;
}
for (auto slot : code_entries_) delete slot.entry;
+
+ code_entries_.clear();
+ code_map_.clear();
+ free_list_head_ = kNoFreeSlot;
}
void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
@@ -727,24 +733,26 @@ void CodeMap::Print() {
CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
: profiler_(nullptr), current_profiles_semaphore_(1) {}
-bool CpuProfilesCollection::StartProfiling(const char* title,
- CpuProfilingOptions options) {
+CpuProfilingStatus CpuProfilesCollection::StartProfiling(
+ const char* title, CpuProfilingOptions options) {
current_profiles_semaphore_.Wait();
+
if (static_cast<int>(current_profiles_.size()) >= kMaxSimultaneousProfiles) {
current_profiles_semaphore_.Signal();
- return false;
+
+ return CpuProfilingStatus::kErrorTooManyProfilers;
}
for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
if (strcmp(profile->title(), title) == 0) {
// Ignore attempts to start profile with the same title...
current_profiles_semaphore_.Signal();
- // ... though return true to force it collect a sample.
- return true;
+ // ... though return kAlreadyStarted to force it collect a sample.
+ return CpuProfilingStatus::kAlreadyStarted;
}
}
current_profiles_.emplace_back(new CpuProfile(profiler_, title, options));
current_profiles_semaphore_.Signal();
- return true;
+ return CpuProfilingStatus::kStarted;
}
CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
@@ -769,7 +777,6 @@ CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
return profile;
}
-
bool CpuProfilesCollection::IsLastProfile(const char* title) {
// Called from VM thread, and only it can mutate the list,
// so no locking is needed here.
@@ -833,172 +840,5 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
current_profiles_semaphore_.Signal();
}
-ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles,
- CodeMap* code_map)
- : profiles_(profiles), code_map_(code_map) {}
-
-void ProfileGenerator::SymbolizeTickSample(const TickSample& sample) {
- ProfileStackTrace stack_trace;
- // Conservatively reserve space for stack frames + pc + function + vm-state.
- // There could in fact be more of them because of inlined entries.
- stack_trace.reserve(sample.frames_count + 3);
-
- // The ProfileNode knows nothing about all versions of generated code for
- // the same JS function. The line number information associated with
- // the latest version of generated code is used to find a source line number
- // for a JS function. Then, the detected source line is passed to
- // ProfileNode to increase the tick count for this source line.
- const int no_line_info = v8::CpuProfileNode::kNoLineNumberInfo;
- int src_line = no_line_info;
- bool src_line_not_found = true;
-
- if (sample.pc != nullptr) {
- if (sample.has_external_callback && sample.state == EXTERNAL) {
- // Don't use PC when in external callback code, as it can point
- // inside a callback's code, and we will erroneously report
- // that a callback calls itself.
- stack_trace.push_back({{FindEntry(reinterpret_cast<Address>(
- sample.external_callback_entry)),
- no_line_info}});
- } else {
- Address attributed_pc = reinterpret_cast<Address>(sample.pc);
- Address pc_entry_instruction_start = kNullAddress;
- CodeEntry* pc_entry =
- FindEntry(attributed_pc, &pc_entry_instruction_start);
- // If there is no pc_entry, we're likely in native code. Find out if the
- // top of the stack (the return address) was pointing inside a JS
- // function, meaning that we have encountered a frameless invocation.
- if (!pc_entry && !sample.has_external_callback) {
- attributed_pc = reinterpret_cast<Address>(sample.tos);
- pc_entry = FindEntry(attributed_pc, &pc_entry_instruction_start);
- }
- // If pc is in the function code before it set up stack frame or after the
- // frame was destroyed, SafeStackFrameIterator incorrectly thinks that
- // ebp contains the return address of the current function and skips the
- // caller's frame. Check for this case and just skip such samples.
- if (pc_entry) {
- int pc_offset =
- static_cast<int>(attributed_pc - pc_entry_instruction_start);
- // TODO(petermarshall): pc_offset can still be negative in some cases.
- src_line = pc_entry->GetSourceLine(pc_offset);
- if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
- src_line = pc_entry->line_number();
- }
- src_line_not_found = false;
- stack_trace.push_back({{pc_entry, src_line}});
-
- if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
- pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
- // When current function is either the Function.prototype.apply or the
- // Function.prototype.call builtin the top frame is either frame of
- // the calling JS function or internal frame.
- // In the latter case we know the caller for sure but in the
- // former case we don't so we simply replace the frame with
- // 'unresolved' entry.
- if (!sample.has_external_callback) {
- ProfilerStats::Instance()->AddReason(
- ProfilerStats::Reason::kInCallOrApply);
- stack_trace.push_back(
- {{CodeEntry::unresolved_entry(), no_line_info}});
- }
- }
- }
- }
-
- for (unsigned i = 0; i < sample.frames_count; ++i) {
- Address stack_pos = reinterpret_cast<Address>(sample.stack[i]);
- Address instruction_start = kNullAddress;
- CodeEntry* entry = FindEntry(stack_pos, &instruction_start);
- int line_number = no_line_info;
- if (entry) {
- // Find out if the entry has an inlining stack associated.
- int pc_offset = static_cast<int>(stack_pos - instruction_start);
- // TODO(petermarshall): pc_offset can still be negative in some cases.
- const std::vector<CodeEntryAndLineNumber>* inline_stack =
- entry->GetInlineStack(pc_offset);
- if (inline_stack) {
- int most_inlined_frame_line_number = entry->GetSourceLine(pc_offset);
- for (auto entry : *inline_stack) {
- stack_trace.push_back({entry});
- }
-
- // This is a bit of a messy hack. The line number for the most-inlined
- // frame (the function at the end of the chain of function calls) has
- // the wrong line number in inline_stack. The actual line number in
- // this function is stored in the SourcePositionTable in entry. We fix
- // up the line number for the most-inlined frame here.
- // TODO(petermarshall): Remove this and use a tree with a node per
- // inlining_id.
- DCHECK(!inline_stack->empty());
- size_t index = stack_trace.size() - inline_stack->size();
- stack_trace[index].entry.line_number = most_inlined_frame_line_number;
- }
- // Skip unresolved frames (e.g. internal frame) and get source line of
- // the first JS caller.
- if (src_line_not_found) {
- src_line = entry->GetSourceLine(pc_offset);
- if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
- src_line = entry->line_number();
- }
- src_line_not_found = false;
- }
- line_number = entry->GetSourceLine(pc_offset);
-
- // The inline stack contains the top-level function i.e. the same
- // function as entry. We don't want to add it twice. The one from the
- // inline stack has the correct line number for this particular inlining
- // so we use it instead of pushing entry to stack_trace.
- if (inline_stack) continue;
- }
- stack_trace.push_back({{entry, line_number}});
- }
- }
-
- if (FLAG_prof_browser_mode) {
- bool no_symbolized_entries = true;
- for (auto e : stack_trace) {
- if (e.entry.code_entry != nullptr) {
- no_symbolized_entries = false;
- break;
- }
- }
- // If no frames were symbolized, put the VM state entry in.
- if (no_symbolized_entries) {
- if (sample.pc == nullptr) {
- ProfilerStats::Instance()->AddReason(ProfilerStats::Reason::kNullPC);
- } else {
- ProfilerStats::Instance()->AddReason(
- ProfilerStats::Reason::kNoSymbolizedFrames);
- }
- stack_trace.push_back({{EntryForVMState(sample.state), no_line_info}});
- }
- }
-
- profiles_->AddPathToCurrentProfiles(sample.timestamp, stack_trace, src_line,
- sample.update_stats,
- sample.sampling_interval);
-}
-
-CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
- switch (tag) {
- case GC:
- return CodeEntry::gc_entry();
- case JS:
- case PARSER:
- case COMPILER:
- case BYTECODE_COMPILER:
- case ATOMICS_WAIT:
- // DOM events handlers are reported as OTHER / EXTERNAL entries.
- // To avoid confusing people, let's put all these entries into
- // one bucket.
- case OTHER:
- case EXTERNAL:
- return CodeEntry::program_entry();
- case IDLE:
- return CodeEntry::idle_entry();
- }
- UNREACHABLE();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 0852ee7802..9183d56d42 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -228,11 +228,7 @@ struct CodeEntryAndLineNumber {
int line_number;
};
-struct ProfileStackFrame {
- CodeEntryAndLineNumber entry;
-};
-
-typedef std::vector<ProfileStackFrame> ProfileStackTrace;
+using ProfileStackTrace = std::vector<CodeEntryAndLineNumber>;
class ProfileTree;
@@ -416,6 +412,8 @@ class V8_EXPORT_PRIVATE CodeMap {
CodeEntry* FindEntry(Address addr, Address* out_instruction_start = nullptr);
void Print();
+ void Clear();
+
private:
struct CodeEntryMapInfo {
unsigned index;
@@ -435,6 +433,7 @@ class V8_EXPORT_PRIVATE CodeMap {
CodeEntry* entry(unsigned index) { return code_entries_[index].entry; }
+ // Added state here needs to be dealt with in Clear() as well.
std::deque<CodeEntrySlotInfo> code_entries_;
std::map<Address, CodeEntryMapInfo> code_map_;
unsigned free_list_head_ = kNoFreeSlot;
@@ -447,7 +446,8 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
explicit CpuProfilesCollection(Isolate* isolate);
void set_cpu_profiler(CpuProfiler* profiler) { profiler_ = profiler; }
- bool StartProfiling(const char* title, CpuProfilingOptions options = {});
+ CpuProfilingStatus StartProfiling(const char* title,
+ CpuProfilingOptions options = {});
CpuProfile* StopProfiling(const char* title);
std::vector<std::unique_ptr<CpuProfile>>* profiles() {
@@ -483,28 +483,6 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
};
-class V8_EXPORT_PRIVATE ProfileGenerator {
- public:
- explicit ProfileGenerator(CpuProfilesCollection* profiles, CodeMap* code_map);
-
- // Use the CodeMap to turn the raw addresses recorded in the sample into
- // code/function names. The symbolized stack is added to the relevant
- // profiles in the CpuProfilesCollection.
- void SymbolizeTickSample(const TickSample& sample);
-
- CodeMap* code_map() { return code_map_; }
-
- private:
- CodeEntry* FindEntry(Address address,
- Address* out_instruction_start = nullptr);
- CodeEntry* EntryForVMState(StateTag tag);
-
- CpuProfilesCollection* profiles_;
- CodeMap* const code_map_;
-
- DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/profiler/symbolizer.cc b/deps/v8/src/profiler/symbolizer.cc
new file mode 100644
index 0000000000..ca6eb269fa
--- /dev/null
+++ b/deps/v8/src/profiler/symbolizer.cc
@@ -0,0 +1,190 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/profiler/symbolizer.h"
+
+#include "src/execution/vm-state.h"
+#include "src/profiler/profile-generator.h"
+#include "src/profiler/profiler-stats.h"
+#include "src/profiler/tick-sample.h"
+
+namespace v8 {
+namespace internal {
+
+Symbolizer::Symbolizer(CodeMap* code_map) : code_map_(code_map) {}
+
+CodeEntry* Symbolizer::FindEntry(Address address,
+ Address* out_instruction_start) {
+ CodeEntry* entry = code_map_->FindEntry(address, out_instruction_start);
+ if (entry) entry->mark_used();
+ return entry;
+}
+
+namespace {
+
+CodeEntry* EntryForVMState(StateTag tag) {
+ switch (tag) {
+ case GC:
+ return CodeEntry::gc_entry();
+ case JS:
+ case PARSER:
+ case COMPILER:
+ case BYTECODE_COMPILER:
+ case ATOMICS_WAIT:
+ // DOM events handlers are reported as OTHER / EXTERNAL entries.
+ // To avoid confusing people, let's put all these entries into
+ // one bucket.
+ case OTHER:
+ case EXTERNAL:
+ return CodeEntry::program_entry();
+ case IDLE:
+ return CodeEntry::idle_entry();
+ }
+}
+
+} // namespace
+
+Symbolizer::SymbolizedSample Symbolizer::SymbolizeTickSample(
+ const TickSample& sample) {
+ ProfileStackTrace stack_trace;
+ // Conservatively reserve space for stack frames + pc + function + vm-state.
+ // There could in fact be more of them because of inlined entries.
+ stack_trace.reserve(sample.frames_count + 3);
+
+ // The ProfileNode knows nothing about all versions of generated code for
+ // the same JS function. The line number information associated with
+ // the latest version of generated code is used to find a source line number
+ // for a JS function. Then, the detected source line is passed to
+ // ProfileNode to increase the tick count for this source line.
+ const int no_line_info = v8::CpuProfileNode::kNoLineNumberInfo;
+ int src_line = no_line_info;
+ bool src_line_not_found = true;
+
+ if (sample.pc != nullptr) {
+ if (sample.has_external_callback && sample.state == EXTERNAL) {
+ // Don't use PC when in external callback code, as it can point
+ // inside a callback's code, and we will erroneously report
+ // that a callback calls itself.
+ stack_trace.push_back(
+ {FindEntry(reinterpret_cast<Address>(sample.external_callback_entry)),
+ no_line_info});
+ } else {
+ Address attributed_pc = reinterpret_cast<Address>(sample.pc);
+ Address pc_entry_instruction_start = kNullAddress;
+ CodeEntry* pc_entry =
+ FindEntry(attributed_pc, &pc_entry_instruction_start);
+ // If there is no pc_entry, we're likely in native code. Find out if the
+ // top of the stack (the return address) was pointing inside a JS
+ // function, meaning that we have encountered a frameless invocation.
+ if (!pc_entry && !sample.has_external_callback) {
+ attributed_pc = reinterpret_cast<Address>(sample.tos);
+ pc_entry = FindEntry(attributed_pc, &pc_entry_instruction_start);
+ }
+ // If pc is in the function code before it set up stack frame or after the
+ // frame was destroyed, SafeStackFrameIterator incorrectly thinks that
+ // ebp contains the return address of the current function and skips the
+ // caller's frame. Check for this case and just skip such samples.
+ if (pc_entry) {
+ int pc_offset =
+ static_cast<int>(attributed_pc - pc_entry_instruction_start);
+ // TODO(petermarshall): pc_offset can still be negative in some cases.
+ src_line = pc_entry->GetSourceLine(pc_offset);
+ if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
+ src_line = pc_entry->line_number();
+ }
+ src_line_not_found = false;
+ stack_trace.push_back({pc_entry, src_line});
+
+ if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
+ pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
+ // When current function is either the Function.prototype.apply or the
+ // Function.prototype.call builtin the top frame is either frame of
+ // the calling JS function or internal frame.
+ // In the latter case we know the caller for sure but in the
+ // former case we don't so we simply replace the frame with
+ // 'unresolved' entry.
+ if (!sample.has_external_callback) {
+ ProfilerStats::Instance()->AddReason(
+ ProfilerStats::Reason::kInCallOrApply);
+ stack_trace.push_back(
+ {CodeEntry::unresolved_entry(), no_line_info});
+ }
+ }
+ }
+ }
+
+ for (unsigned i = 0; i < sample.frames_count; ++i) {
+ Address stack_pos = reinterpret_cast<Address>(sample.stack[i]);
+ Address instruction_start = kNullAddress;
+ CodeEntry* entry = FindEntry(stack_pos, &instruction_start);
+ int line_number = no_line_info;
+ if (entry) {
+ // Find out if the entry has an inlining stack associated.
+ int pc_offset = static_cast<int>(stack_pos - instruction_start);
+ // TODO(petermarshall): pc_offset can still be negative in some cases.
+ const std::vector<CodeEntryAndLineNumber>* inline_stack =
+ entry->GetInlineStack(pc_offset);
+ if (inline_stack) {
+ int most_inlined_frame_line_number = entry->GetSourceLine(pc_offset);
+ for (auto entry : *inline_stack) {
+ stack_trace.push_back(entry);
+ }
+
+ // This is a bit of a messy hack. The line number for the most-inlined
+ // frame (the function at the end of the chain of function calls) has
+ // the wrong line number in inline_stack. The actual line number in
+ // this function is stored in the SourcePositionTable in entry. We fix
+ // up the line number for the most-inlined frame here.
+ // TODO(petermarshall): Remove this and use a tree with a node per
+ // inlining_id.
+ DCHECK(!inline_stack->empty());
+ size_t index = stack_trace.size() - inline_stack->size();
+ stack_trace[index].line_number = most_inlined_frame_line_number;
+ }
+ // Skip unresolved frames (e.g. internal frame) and get source line of
+ // the first JS caller.
+ if (src_line_not_found) {
+ src_line = entry->GetSourceLine(pc_offset);
+ if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
+ src_line = entry->line_number();
+ }
+ src_line_not_found = false;
+ }
+ line_number = entry->GetSourceLine(pc_offset);
+
+ // The inline stack contains the top-level function i.e. the same
+ // function as entry. We don't want to add it twice. The one from the
+ // inline stack has the correct line number for this particular inlining
+ // so we use it instead of pushing entry to stack_trace.
+ if (inline_stack) continue;
+ }
+ stack_trace.push_back({entry, line_number});
+ }
+ }
+
+ if (FLAG_prof_browser_mode) {
+ bool no_symbolized_entries = true;
+ for (auto e : stack_trace) {
+ if (e.code_entry != nullptr) {
+ no_symbolized_entries = false;
+ break;
+ }
+ }
+ // If no frames were symbolized, put the VM state entry in.
+ if (no_symbolized_entries) {
+ if (sample.pc == nullptr) {
+ ProfilerStats::Instance()->AddReason(ProfilerStats::Reason::kNullPC);
+ } else {
+ ProfilerStats::Instance()->AddReason(
+ ProfilerStats::Reason::kNoSymbolizedFrames);
+ }
+ stack_trace.push_back({EntryForVMState(sample.state), no_line_info});
+ }
+ }
+
+ return SymbolizedSample{stack_trace, src_line};
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/profiler/symbolizer.h b/deps/v8/src/profiler/symbolizer.h
new file mode 100644
index 0000000000..f18339e7f1
--- /dev/null
+++ b/deps/v8/src/profiler/symbolizer.h
@@ -0,0 +1,44 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROFILER_SYMBOLIZER_H_
+#define V8_PROFILER_SYMBOLIZER_H_
+
+#include "src/base/macros.h"
+#include "src/profiler/profile-generator.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeEntry;
+class CodeMap;
+
+class V8_EXPORT_PRIVATE Symbolizer {
+ public:
+ explicit Symbolizer(CodeMap* code_map);
+
+ struct SymbolizedSample {
+ ProfileStackTrace stack_trace;
+ int src_line;
+ };
+
+ // Use the CodeMap to turn the raw addresses recorded in the sample into
+ // code/function names.
+ SymbolizedSample SymbolizeTickSample(const TickSample& sample);
+
+ CodeMap* code_map() { return code_map_; }
+
+ private:
+ CodeEntry* FindEntry(Address address,
+ Address* out_instruction_start = nullptr);
+
+ CodeMap* const code_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(Symbolizer);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PROFILER_SYMBOLIZER_H_
diff --git a/deps/v8/src/regexp/DIR_METADATA b/deps/v8/src/regexp/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/regexp/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/regexp/OWNERS b/deps/v8/src/regexp/OWNERS
index 250c8c6b88..3322bb9505 100644
--- a/deps/v8/src/regexp/OWNERS
+++ b/deps/v8/src/regexp/OWNERS
@@ -1,4 +1,2 @@
jgruber@chromium.org
yangguo@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 78b586e265..48e8fae663 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -127,6 +127,7 @@ RegExpMacroAssemblerARM::~RegExpMacroAssemblerARM() {
exit_label_.Unuse();
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
+ fallback_label_.Unuse();
}
@@ -164,8 +165,13 @@ void RegExpMacroAssemblerARM::Backtrack() {
__ cmp(r0, Operand(backtrack_limit()));
__ b(ne, &next);
- // Exceeded limits are treated as a failed match.
- Fail();
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ jmp(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
__ bind(&next);
}
@@ -901,6 +907,12 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ jmp(&return_r0);
}
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ mov(r0, Operand(FALLBACK_TO_EXPERIMENTAL));
+ __ jmp(&return_r0);
+ }
+
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
Handle<Code> code =
@@ -1072,7 +1084,6 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState() {
__ mov(ip, Operand(stack_guard_check));
EmbeddedData d = EmbeddedData::FromBlob();
- CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
__ mov(lr, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
__ Call(lr);
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index 910e5c4607..92cac644e5 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -203,6 +203,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
+ Label fallback_label_;
};
} // namespace internal
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index ac33f8631f..32fed3703b 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -142,6 +142,7 @@ RegExpMacroAssemblerARM64::~RegExpMacroAssemblerARM64() {
exit_label_.Unuse();
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
+ fallback_label_.Unuse();
}
int RegExpMacroAssemblerARM64::stack_limit_slack() {
@@ -201,8 +202,13 @@ void RegExpMacroAssemblerARM64::Backtrack() {
__ Cmp(scratch, Operand(backtrack_limit()));
__ B(ne, &next);
- // Exceeded limits are treated as a failed match.
- Fail();
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ B(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
__ bind(&next);
}
@@ -1094,6 +1100,12 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ B(&return_w0);
}
+ if (fallback_label_.is_linked()) {
+ __ Bind(&fallback_label_);
+ __ Mov(w0, FALLBACK_TO_EXPERIMENTAL);
+ __ B(&return_w0);
+ }
+
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
Handle<Code> code =
@@ -1399,7 +1411,6 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
Register scratch = temps.AcquireX();
EmbeddedData d = EmbeddedData::FromBlob();
- CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
__ Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index aeb49aa9ff..6d60271a43 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -279,6 +279,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
+ Label fallback_label_;
};
} // namespace internal
diff --git a/deps/v8/src/regexp/experimental/experimental-bytecode.h b/deps/v8/src/regexp/experimental/experimental-bytecode.h
index 3cb65828c5..4e9bc9396b 100644
--- a/deps/v8/src/regexp/experimental/experimental-bytecode.h
+++ b/deps/v8/src/regexp/experimental/experimental-bytecode.h
@@ -106,21 +106,21 @@ struct RegExpInstruction {
uc16 max; // Inclusive.
};
- static RegExpInstruction ConsumeRange(Uc16Range consume_range) {
+ static RegExpInstruction ConsumeRange(uc16 min, uc16 max) {
RegExpInstruction result;
result.opcode = CONSUME_RANGE;
- result.payload.consume_range = consume_range;
+ result.payload.consume_range = Uc16Range{min, max};
return result;
}
static RegExpInstruction ConsumeAnyChar() {
- return ConsumeRange(Uc16Range{0x0000, 0xFFFF});
+ return ConsumeRange(0x0000, 0xFFFF);
}
static RegExpInstruction Fail() {
// This is encoded as the empty CONSUME_RANGE of characters 0xFFFF <= c <=
// 0x0000.
- return ConsumeRange(Uc16Range{0xFFFF, 0x0000});
+ return ConsumeRange(0xFFFF, 0x0000);
}
static RegExpInstruction Fork(int32_t alt_index) {
diff --git a/deps/v8/src/regexp/experimental/experimental-compiler.cc b/deps/v8/src/regexp/experimental/experimental-compiler.cc
index 615f7566f4..4d53c2c0c5 100644
--- a/deps/v8/src/regexp/experimental/experimental-compiler.cc
+++ b/deps/v8/src/regexp/experimental/experimental-compiler.cc
@@ -35,7 +35,7 @@ class CanBeHandledVisitor final : private RegExpVisitor {
// future.
static constexpr JSRegExp::Flags kAllowedFlags =
JSRegExp::kGlobal | JSRegExp::kSticky | JSRegExp::kMultiline |
- JSRegExp::kDotAll;
+ JSRegExp::kDotAll | JSRegExp::kLinear;
// We support Unicode iff kUnicode is among the supported flags.
STATIC_ASSERT(ExperimentalRegExp::kSupportsUnicode ==
((kAllowedFlags & JSRegExp::kUnicode) != 0));
@@ -177,94 +177,120 @@ class CanBeHandledVisitor final : private RegExpVisitor {
bool ExperimentalRegExpCompiler::CanBeHandled(RegExpTree* tree,
JSRegExp::Flags flags,
int capture_count) {
- DCHECK(FLAG_enable_experimental_regexp_engine);
return CanBeHandledVisitor::Check(tree, flags, capture_count);
}
namespace {
-// A label in bytecode with known address.
-class Label {
+// A label in bytecode which starts with no known address. The address *must*
+// be bound with `Bind` before the label goes out of scope.
+// Implemented as a linked list through the `payload.pc` of FORK and JMP
+// instructions.
+struct Label {
public:
- explicit Label(int index) : index_(index) { DCHECK_GE(index_, 0); }
-
- int index() { return index_; }
-
- // Friend functions because `label.AddForkTo(code, zone)` reads like we're
- // adding code to where `label` is defined, but we're adding a fork with
- // target `label` at the end of `code`.
- friend void AddForkTo(Label target, ZoneList<RegExpInstruction>& code,
- Zone* zone) {
- code.Add(RegExpInstruction::Fork(target.index_), zone);
+ Label() = default;
+ ~Label() {
+ DCHECK_EQ(state_, BOUND);
+ DCHECK_GE(bound_index_, 0);
}
- friend void AddJmpTo(Label target, ZoneList<RegExpInstruction>& code,
- Zone* zone) {
- code.Add(RegExpInstruction::Jmp(target.index_), zone);
- }
+ // Don't copy, don't move. Moving could be implemented, but it's not
+ // needed anywhere.
+ Label(const Label&) = delete;
+ Label& operator=(const Label&) = delete;
private:
- int index_;
+ friend class BytecodeAssembler;
+
+ // UNBOUND implies unbound_patch_list_begin_.
+ // BOUND implies bound_index_.
+ enum { UNBOUND, BOUND } state_ = UNBOUND;
+ union {
+ int unbound_patch_list_begin_ = -1;
+ int bound_index_;
+ };
};
-// A label in bytecode whose address is not known yet. The address *must* be
-// `Bind` before the deferred label object goes out of scope, and the deferred
-// label object *must not* be used after it was defined. (Use the `Label`
-// object returned by `Bind` instead.)
-struct DeferredLabel {
- // Implemented as a linked list through the `payload.pc` of FORK and JMP
- // instructions.
+class BytecodeAssembler {
public:
- DeferredLabel() = default;
- ~DeferredLabel() { DCHECK_EQ(patch_list_begin_, kLabelWasDefined); }
+ // TODO(mbid,v8:10765): Use some upper bound for code_ capacity computed from
+ // the `tree` size we're going to compile?
+ explicit BytecodeAssembler(Zone* zone) : zone_(zone), code_(0, zone) {}
+
+ ZoneList<RegExpInstruction> IntoCode() && { return std::move(code_); }
+
+ void Accept() { code_.Add(RegExpInstruction::Accept(), zone_); }
+
+ void Assertion(RegExpAssertion::AssertionType t) {
+ code_.Add(RegExpInstruction::Assertion(t), zone_);
+ }
+
+ void ClearRegister(int32_t register_index) {
+ code_.Add(RegExpInstruction::ClearRegister(register_index), zone_);
+ }
+
+ void ConsumeRange(uc16 from, uc16 to) {
+ code_.Add(RegExpInstruction::ConsumeRange(from, to), zone_);
+ }
+
+ void ConsumeAnyChar() {
+ code_.Add(RegExpInstruction::ConsumeAnyChar(), zone_);
+ }
+
+ void Fork(Label& target) {
+ LabelledInstrImpl(RegExpInstruction::Opcode::FORK, target);
+ }
- friend void AddForkTo(DeferredLabel& target,
- ZoneList<RegExpInstruction>& code, Zone* zone) {
- DCHECK_NE(target.patch_list_begin_, DeferredLabel::kLabelWasDefined);
- int new_list_begin = code.length();
- DCHECK_GE(new_list_begin, 0);
- code.Add(RegExpInstruction::Fork(target.patch_list_begin_), zone);
- target.patch_list_begin_ = new_list_begin;
+ void Jmp(Label& target) {
+ LabelledInstrImpl(RegExpInstruction::Opcode::JMP, target);
}
- friend void AddJmpTo(DeferredLabel& target, ZoneList<RegExpInstruction>& code,
- Zone* zone) {
- DCHECK_NE(target.patch_list_begin_, DeferredLabel::kLabelWasDefined);
- int new_list_begin = code.length();
- DCHECK_GE(new_list_begin, 0);
- code.Add(RegExpInstruction::Jmp(target.patch_list_begin_), zone);
- target.patch_list_begin_ = new_list_begin;
+ void SetRegisterToCp(int32_t register_index) {
+ code_.Add(RegExpInstruction::SetRegisterToCp(register_index), zone_);
}
- // Define the deferred label as referring to the next instruction that will
- // be pushed to `code`. Consumes the DeferredLabel object and returns a
- // Label object.
- Label Bind(ZoneList<RegExpInstruction>& code) && {
- DCHECK_NE(patch_list_begin_, kLabelWasDefined);
+ void Bind(Label& target) {
+ DCHECK_EQ(target.state_, Label::UNBOUND);
- int index = code.length();
+ int index = code_.length();
- while (patch_list_begin_ != kEmptyList) {
- RegExpInstruction& inst = code[patch_list_begin_];
+ while (target.unbound_patch_list_begin_ != -1) {
+ RegExpInstruction& inst = code_[target.unbound_patch_list_begin_];
DCHECK(inst.opcode == RegExpInstruction::FORK ||
inst.opcode == RegExpInstruction::JMP);
- patch_list_begin_ = inst.payload.pc;
+ target.unbound_patch_list_begin_ = inst.payload.pc;
inst.payload.pc = index;
}
- patch_list_begin_ = kLabelWasDefined;
- return Label(index);
+ target.state_ = Label::BOUND;
+ target.bound_index_ = index;
}
+ void Fail() { code_.Add(RegExpInstruction::Fail(), zone_); }
+
private:
- static constexpr int kEmptyList = -1;
- static constexpr int kLabelWasDefined = -2;
- int patch_list_begin_ = kEmptyList;
+ void LabelledInstrImpl(RegExpInstruction::Opcode op, Label& target) {
+ RegExpInstruction result;
+ result.opcode = op;
- // Don't copy, don't move. Moving could be implemented, but it's not
- // needed anywhere.
- DISALLOW_COPY_AND_ASSIGN(DeferredLabel);
+ if (target.state_ == Label::BOUND) {
+ result.payload.pc = target.bound_index_;
+ } else {
+ DCHECK_EQ(target.state_, Label::UNBOUND);
+ int new_list_begin = code_.length();
+ DCHECK_GE(new_list_begin, 0);
+
+ result.payload.pc = target.unbound_patch_list_begin_;
+
+ target.unbound_patch_list_begin_ = new_list_begin;
+ }
+
+ code_.Add(result, zone_);
+ }
+
+ Zone* zone_;
+ ZoneList<RegExpInstruction> code_;
};
class CompileVisitor : private RegExpVisitor {
@@ -278,27 +304,24 @@ class CompileVisitor : private RegExpVisitor {
// The match is not anchored, i.e. may start at any input position, so we
// emit a preamble corresponding to /.*?/. This skips an arbitrary
// prefix in the input non-greedily.
- compiler.CompileNonGreedyStar([&]() {
- compiler.code_.Add(RegExpInstruction::ConsumeAnyChar(), zone);
- });
+ compiler.CompileNonGreedyStar(
+ [&]() { compiler.assembler_.ConsumeAnyChar(); });
}
- compiler.code_.Add(RegExpInstruction::SetRegisterToCp(0), zone);
+ compiler.assembler_.SetRegisterToCp(0);
tree->Accept(&compiler, nullptr);
- compiler.code_.Add(RegExpInstruction::SetRegisterToCp(1), zone);
- compiler.code_.Add(RegExpInstruction::Accept(), zone);
+ compiler.assembler_.SetRegisterToCp(1);
+ compiler.assembler_.Accept();
- return std::move(compiler.code_);
+ return std::move(compiler.assembler_).IntoCode();
}
private:
- // TODO(mbid,v8:10765): Use some upper bound for code_ capacity computed from
- // the `tree` size we're going to compile?
- explicit CompileVisitor(Zone* zone) : zone_(zone), code_(0, zone) {}
+ explicit CompileVisitor(Zone* zone) : zone_(zone), assembler_(zone) {}
// Generate a disjunction of code fragments compiled by a function `alt_gen`.
// `alt_gen` is called repeatedly with argument `int i = 0, 1, ..., alt_num -
- // 1` and should push code corresponding to the ith alternative onto `code_`.
+ // 1` and should build code corresponding to the ith alternative.
template <class F>
void CompileDisjunction(int alt_num, F&& gen_alt) {
// An alternative a1 | ... | an is compiled into
@@ -325,23 +348,23 @@ class CompileVisitor : private RegExpVisitor {
if (alt_num == 0) {
// The empty disjunction. This can never match.
- code_.Add(RegExpInstruction::Fail(), zone_);
+ assembler_.Fail();
return;
}
- DeferredLabel end;
+ Label end;
for (int i = 0; i != alt_num - 1; ++i) {
- DeferredLabel tail;
- AddForkTo(tail, code_, zone_);
+ Label tail;
+ assembler_.Fork(tail);
gen_alt(i);
- AddJmpTo(end, code_, zone_);
- std::move(tail).Bind(code_);
+ assembler_.Jmp(end);
+ assembler_.Bind(tail);
}
gen_alt(alt_num - 1);
- std::move(end).Bind(code_);
+ assembler_.Bind(end);
}
void* VisitDisjunction(RegExpDisjunction* node, void*) override {
@@ -359,7 +382,7 @@ class CompileVisitor : private RegExpVisitor {
}
void* VisitAssertion(RegExpAssertion* node, void*) override {
- code_.Add(RegExpInstruction::Assertion(node->assertion_type()), zone_);
+ assembler_.Assertion(node->assertion_type());
return nullptr;
}
@@ -390,17 +413,14 @@ class CompileVisitor : private RegExpVisitor {
DCHECK_IMPLIES(to > kMaxSupportedCodepoint, to == String::kMaxCodePoint);
uc16 to_uc16 = static_cast<uc16>(std::min(to, kMaxSupportedCodepoint));
- RegExpInstruction::Uc16Range range{from_uc16, to_uc16};
- code_.Add(RegExpInstruction::ConsumeRange(range), zone_);
+ assembler_.ConsumeRange(from_uc16, to_uc16);
});
return nullptr;
}
void* VisitAtom(RegExpAtom* node, void*) override {
for (uc16 c : node->data()) {
- code_.Add(
- RegExpInstruction::ConsumeRange(RegExpInstruction::Uc16Range{c, c}),
- zone_);
+ assembler_.ConsumeRange(c, c);
}
return nullptr;
}
@@ -413,7 +433,7 @@ class CompileVisitor : private RegExpVisitor {
// It suffices to clear the register containing the `begin` of a capture
// because this indicates that the capture is undefined, regardless of
// the value in the `end` register.
- code_.Add(RegExpInstruction::ClearRegister(i), zone_);
+ assembler_.ClearRegister(i);
}
}
@@ -431,14 +451,15 @@ class CompileVisitor : private RegExpVisitor {
//
// This is greedy because a forked thread has lower priority than the
// thread that spawned it.
- Label begin(code_.length());
- DeferredLabel end;
+ Label begin;
+ Label end;
- AddForkTo(end, code_, zone_);
+ assembler_.Bind(begin);
+ assembler_.Fork(end);
emit_body();
- AddJmpTo(begin, code_, zone_);
+ assembler_.Jmp(begin);
- std::move(end).Bind(code_);
+ assembler_.Bind(end);
}
// Emit bytecode corresponding to /<emit_body>*?/.
@@ -454,18 +475,17 @@ class CompileVisitor : private RegExpVisitor {
// end:
// ...
- Label body(code_.length() + 2);
- DeferredLabel end;
-
- AddForkTo(body, code_, zone_);
- AddJmpTo(end, code_, zone_);
+ Label body;
+ Label end;
- DCHECK_EQ(body.index(), code_.length());
+ assembler_.Fork(body);
+ assembler_.Jmp(end);
+ assembler_.Bind(body);
emit_body();
- AddForkTo(body, code_, zone_);
+ assembler_.Fork(body);
- std::move(end).Bind(code_);
+ assembler_.Bind(end);
}
// Emit bytecode corresponding to /<emit_body>{0, max_repetition_num}/.
@@ -484,12 +504,12 @@ class CompileVisitor : private RegExpVisitor {
// end:
// ...
- DeferredLabel end;
+ Label end;
for (int i = 0; i != max_repetition_num; ++i) {
- AddForkTo(end, code_, zone_);
+ assembler_.Fork(end);
emit_body();
}
- std::move(end).Bind(code_);
+ assembler_.Bind(end);
}
// Emit bytecode corresponding to /<emit_body>{0, max_repetition_num}?/.
@@ -512,17 +532,16 @@ class CompileVisitor : private RegExpVisitor {
// end:
// ...
- DeferredLabel end;
+ Label end;
for (int i = 0; i != max_repetition_num; ++i) {
- Label body(code_.length() + 2);
- AddForkTo(body, code_, zone_);
- AddJmpTo(end, code_, zone_);
-
- DCHECK_EQ(body.index(), code_.length());
+ Label body;
+ assembler_.Fork(body);
+ assembler_.Jmp(end);
+ assembler_.Bind(body);
emit_body();
}
- std::move(end).Bind(code_);
+ assembler_.Bind(end);
}
void* VisitQuantifier(RegExpQuantifier* node, void*) override {
@@ -571,9 +590,9 @@ class CompileVisitor : private RegExpVisitor {
int index = node->index();
int start_register = RegExpCapture::StartRegister(index);
int end_register = RegExpCapture::EndRegister(index);
- code_.Add(RegExpInstruction::SetRegisterToCp(start_register), zone_);
+ assembler_.SetRegisterToCp(start_register);
node->body()->Accept(this, nullptr);
- code_.Add(RegExpInstruction::SetRegisterToCp(end_register), zone_);
+ assembler_.SetRegisterToCp(end_register);
return nullptr;
}
@@ -602,7 +621,7 @@ class CompileVisitor : private RegExpVisitor {
private:
Zone* zone_;
- ZoneList<RegExpInstruction> code_;
+ BytecodeAssembler assembler_;
};
} // namespace
diff --git a/deps/v8/src/regexp/experimental/experimental-interpreter.cc b/deps/v8/src/regexp/experimental/experimental-interpreter.cc
index 8db93ca746..fffca782fe 100644
--- a/deps/v8/src/regexp/experimental/experimental-interpreter.cc
+++ b/deps/v8/src/regexp/experimental/experimental-interpreter.cc
@@ -5,6 +5,8 @@
#include "src/regexp/experimental/experimental-interpreter.h"
#include "src/base/optional.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/string-inl.h"
#include "src/regexp/experimental/experimental.h"
#include "src/strings/char-predicates-inl.h"
#include "src/zone/zone-allocator.h"
@@ -50,6 +52,37 @@ bool SatisfiesAssertion(RegExpAssertion::AssertionType type,
}
}
+Vector<RegExpInstruction> ToInstructionVector(
+ ByteArray raw_bytes, const DisallowHeapAllocation& no_gc) {
+ RegExpInstruction* inst_begin =
+ reinterpret_cast<RegExpInstruction*>(raw_bytes.GetDataStartAddress());
+ int inst_num = raw_bytes.length() / sizeof(RegExpInstruction);
+ DCHECK_EQ(sizeof(RegExpInstruction) * inst_num, raw_bytes.length());
+ return Vector<RegExpInstruction>(inst_begin, inst_num);
+}
+
+template <class Character>
+Vector<const Character> ToCharacterVector(String str,
+ const DisallowHeapAllocation& no_gc);
+
+template <>
+Vector<const uint8_t> ToCharacterVector<uint8_t>(
+ String str, const DisallowHeapAllocation& no_gc) {
+ DCHECK(str.IsFlat());
+ String::FlatContent content = str.GetFlatContent(no_gc);
+ DCHECK(content.IsOneByte());
+ return content.ToOneByteVector();
+}
+
+template <>
+Vector<const uc16> ToCharacterVector<uc16>(
+ String str, const DisallowHeapAllocation& no_gc) {
+ DCHECK(str.IsFlat());
+ String::FlatContent content = str.GetFlatContent(no_gc);
+ DCHECK(content.IsTwoByte());
+ return content.ToUC16Vector();
+}
+
template <class Character>
class NfaInterpreter {
// Executes a bytecode program in breadth-first mode, without backtracking.
@@ -100,12 +133,16 @@ class NfaInterpreter {
// with high priority are left, we return the match that was produced by the
// ACCEPTing thread with highest priority.
public:
- NfaInterpreter(Vector<const RegExpInstruction> bytecode,
- int register_count_per_match, Vector<const Character> input,
+ NfaInterpreter(Isolate* isolate, RegExp::CallOrigin call_origin,
+ ByteArray bytecode, int register_count_per_match, String input,
int32_t input_index, Zone* zone)
- : bytecode_(bytecode),
+ : isolate_(isolate),
+ call_origin_(call_origin),
+ bytecode_object_(bytecode),
+ bytecode_(ToInstructionVector(bytecode, no_gc_)),
register_count_per_match_(register_count_per_match),
- input_(input),
+ input_object_(input),
+ input_(ToCharacterVector<Character>(input, no_gc_)),
input_index_(input_index),
pc_last_input_index_(zone->NewArray<int>(bytecode.length()),
bytecode.length()),
@@ -131,12 +168,15 @@ class NfaInterpreter {
int match_num = 0;
while (match_num != max_match_num) {
- FindNextMatch();
+ int err_code = FindNextMatch();
+ if (err_code != RegExp::kInternalRegExpSuccess) return err_code;
+
if (!FoundMatch()) break;
- Vector<int> registers = *best_match_registers_;
+ Vector<int> registers = *best_match_registers_;
output_registers =
std::copy(registers.begin(), registers.end(), output_registers);
+
++match_num;
const int match_begin = registers[0];
@@ -177,6 +217,69 @@ class NfaInterpreter {
int* register_array_begin;
};
+ // Handles pending interrupts if there are any. Returns
+ // RegExp::kInternalRegExpSuccess if execution can continue, and an error
+ // code otherwise.
+ int HandleInterrupts() {
+ StackLimitCheck check(isolate_);
+ if (call_origin_ == RegExp::CallOrigin::kFromJs) {
+ // Direct calls from JavaScript can be interrupted in two ways:
+ // 1. A real stack overflow, in which case we let the caller throw the
+ // exception.
+ // 2. The stack guard was used to interrupt execution for another purpose,
+ // forcing the call through the runtime system.
+ if (check.JsHasOverflowed()) {
+ return RegExp::kInternalRegExpException;
+ } else if (check.InterruptRequested()) {
+ return RegExp::kInternalRegExpRetry;
+ }
+ } else {
+ DCHECK(call_origin_ == RegExp::CallOrigin::kFromRuntime);
+ HandleScope handles(isolate_);
+ Handle<ByteArray> bytecode_handle(bytecode_object_, isolate_);
+ Handle<String> input_handle(input_object_, isolate_);
+
+ if (check.JsHasOverflowed()) {
+ // We abort the interpreter now anyway, so gc can't invalidate any
+ // pointers.
+ AllowHeapAllocation yes_gc;
+ isolate_->StackOverflow();
+ return RegExp::kInternalRegExpException;
+ } else if (check.InterruptRequested()) {
+ // TODO(mbid): Is this really equivalent to whether the string is
+ // one-byte or two-byte? A comment at the declaration of
+ // IsOneByteRepresentationUnderneath says that this might fail for
+ // external strings.
+ const bool was_one_byte =
+ String::IsOneByteRepresentationUnderneath(input_object_);
+
+ Object result;
+ {
+ AllowHeapAllocation yes_gc;
+ result = isolate_->stack_guard()->HandleInterrupts();
+ }
+ if (result.IsException(isolate_)) {
+ return RegExp::kInternalRegExpException;
+ }
+
+ // If we changed between a LATIN1 and a UC16 string, we need to restart
+ // regexp matching with the appropriate template instantiation of
+ // RawMatch.
+ if (String::IsOneByteRepresentationUnderneath(*input_handle) !=
+ was_one_byte) {
+ return RegExp::kInternalRegExpRetry;
+ }
+
+ // Update objects and pointers in case they have changed during gc.
+ bytecode_object_ = *bytecode_handle;
+ bytecode_ = ToInstructionVector(bytecode_object_, no_gc_);
+ input_object_ = *input_handle;
+ input_ = ToCharacterVector<Character>(input_object_, no_gc_);
+ }
+ }
+ return RegExp::kInternalRegExpSuccess;
+ }
+
// Change the current input index for future calls to `FindNextMatch`.
void SetInputIndex(int new_input_index) {
DCHECK_GE(input_index_, 0);
@@ -187,8 +290,10 @@ class NfaInterpreter {
// Find the next match and return the corresponding capture registers and
// write its capture registers to `best_match_registers_`. The search starts
- // at the current `input_index_`.
- void FindNextMatch() {
+ // at the current `input_index_`. Returns RegExp::kInternalRegExpSuccess if
+ // execution could finish regularly (with or without a match) and an error
+ // code due to interrupt otherwise.
+ int FindNextMatch() {
DCHECK(active_threads_.is_empty());
// TODO(mbid,v8:10765): Can we get around resetting `pc_last_input_index_`
// here? As long as
@@ -240,12 +345,20 @@ class NfaInterpreter {
uc16 input_char = input_[input_index_];
++input_index_;
+ static constexpr int kTicksBetweenInterruptHandling = 64;
+ if (input_index_ % kTicksBetweenInterruptHandling == 0) {
+ int err_code = HandleInterrupts();
+ if (err_code != RegExp::kInternalRegExpSuccess) return err_code;
+ }
+
// We unblock all blocked_threads_ by feeding them the input char.
FlushBlockedThreads(input_char);
// Run all threads until they block or accept.
RunActiveThreads();
}
+
+ return RegExp::kInternalRegExpSuccess;
}
// Run an active thread `t` until it executes a CONSUME_RANGE or ACCEPT
@@ -394,12 +507,20 @@ class NfaInterpreter {
pc_last_input_index_[pc] = input_index_;
}
- const Vector<const RegExpInstruction> bytecode_;
+ Isolate* const isolate_;
+
+ const RegExp::CallOrigin call_origin_;
+
+ const DisallowHeapAllocation no_gc_;
+
+ ByteArray bytecode_object_;
+ Vector<const RegExpInstruction> bytecode_;
// Number of registers used per thread.
const int register_count_per_match_;
- const Vector<const Character> input_;
+ String input_object_;
+ Vector<const Character> input_;
int input_index_;
// pc_last_input_index_[k] records the value of input_index_ the last
@@ -432,22 +553,25 @@ class NfaInterpreter {
} // namespace
-int ExperimentalRegExpInterpreter::FindMatchesNfaOneByte(
- Vector<const RegExpInstruction> bytecode, int register_count_per_match,
- Vector<const uint8_t> input, int start_index, int32_t* output_registers,
- int output_register_count, Zone* zone) {
- NfaInterpreter<uint8_t> interpreter(bytecode, register_count_per_match, input,
- start_index, zone);
- return interpreter.FindMatches(output_registers, output_register_count);
-}
-
-int ExperimentalRegExpInterpreter::FindMatchesNfaTwoByte(
- Vector<const RegExpInstruction> bytecode, int register_count_per_match,
- Vector<const uc16> input, int start_index, int32_t* output_registers,
- int output_register_count, Zone* zone) {
- NfaInterpreter<uc16> interpreter(bytecode, register_count_per_match, input,
- start_index, zone);
- return interpreter.FindMatches(output_registers, output_register_count);
+int ExperimentalRegExpInterpreter::FindMatches(
+ Isolate* isolate, RegExp::CallOrigin call_origin, ByteArray bytecode,
+ int register_count_per_match, String input, int start_index,
+ int32_t* output_registers, int output_register_count, Zone* zone) {
+ DCHECK(input.IsFlat());
+ DisallowHeapAllocation no_gc;
+
+ if (input.GetFlatContent(no_gc).IsOneByte()) {
+ NfaInterpreter<uint8_t> interpreter(isolate, call_origin, bytecode,
+ register_count_per_match, input,
+ start_index, zone);
+ return interpreter.FindMatches(output_registers, output_register_count);
+ } else {
+ DCHECK(input.GetFlatContent(no_gc).IsTwoByte());
+ NfaInterpreter<uc16> interpreter(isolate, call_origin, bytecode,
+ register_count_per_match, input,
+ start_index, zone);
+ return interpreter.FindMatches(output_registers, output_register_count);
+ }
}
} // namespace internal
diff --git a/deps/v8/src/regexp/experimental/experimental-interpreter.h b/deps/v8/src/regexp/experimental/experimental-interpreter.h
index 32bff001b1..3da50e3902 100644
--- a/deps/v8/src/regexp/experimental/experimental-interpreter.h
+++ b/deps/v8/src/regexp/experimental/experimental-interpreter.h
@@ -5,7 +5,10 @@
#ifndef V8_REGEXP_EXPERIMENTAL_EXPERIMENTAL_INTERPRETER_H_
#define V8_REGEXP_EXPERIMENTAL_EXPERIMENTAL_INTERPRETER_H_
+#include "src/objects/fixed-array.h"
+#include "src/objects/string.h"
#include "src/regexp/experimental/experimental-bytecode.h"
+#include "src/regexp/regexp.h"
#include "src/utils/vector.h"
namespace v8 {
@@ -18,18 +21,13 @@ class ExperimentalRegExpInterpreter final : public AllStatic {
// Executes a bytecode program in breadth-first NFA mode, without
// backtracking, to find matching substrings. Trys to find up to
// `max_match_num` matches in `input`, starting at `start_index`. Returns
- // the actual number of matches found. The boundaires of matching subranges
+ // the actual number of matches found. The boundaries of matching subranges
// are written to `matches_out`. Provided in variants for one-byte and
// two-byte strings.
- static int FindMatchesNfaOneByte(Vector<const RegExpInstruction> bytecode,
- int capture_count,
- Vector<const uint8_t> input, int start_index,
- int32_t* output_registers,
- int output_register_count, Zone* zone);
- static int FindMatchesNfaTwoByte(Vector<const RegExpInstruction> bytecode,
- int capture_count, Vector<const uc16> input,
- int start_index, int32_t* output_registers,
- int output_register_count, Zone* zone);
+ static int FindMatches(Isolate* isolate, RegExp::CallOrigin call_origin,
+ ByteArray bytecode, int capture_count, String input,
+ int start_index, int32_t* output_registers,
+ int output_register_count, Zone* zone);
};
} // namespace internal
diff --git a/deps/v8/src/regexp/experimental/experimental.cc b/deps/v8/src/regexp/experimental/experimental.cc
index dc919f56c2..56c0596bb4 100644
--- a/deps/v8/src/regexp/experimental/experimental.cc
+++ b/deps/v8/src/regexp/experimental/experimental.cc
@@ -15,6 +15,8 @@ namespace internal {
bool ExperimentalRegExp::CanBeHandled(RegExpTree* tree, JSRegExp::Flags flags,
int capture_count) {
+ DCHECK(FLAG_enable_experimental_regexp_engine ||
+ FLAG_enable_experimental_regexp_engine_on_excessive_backtracks);
return ExperimentalRegExpCompiler::CanBeHandled(tree, flags, capture_count);
}
@@ -33,7 +35,6 @@ void ExperimentalRegExp::Initialize(Isolate* isolate, Handle<JSRegExp> re,
bool ExperimentalRegExp::IsCompiled(Handle<JSRegExp> re, Isolate* isolate) {
DCHECK(FLAG_enable_experimental_regexp_engine);
-
DCHECK_EQ(re->TypeTag(), JSRegExp::EXPERIMENTAL);
#ifdef VERIFY_HEAP
re->JSRegExpVerify(isolate);
@@ -43,22 +44,34 @@ bool ExperimentalRegExp::IsCompiled(Handle<JSRegExp> re, Isolate* isolate) {
Smi::FromInt(JSRegExp::kUninitializedValue);
}
-bool ExperimentalRegExp::Compile(Isolate* isolate, Handle<JSRegExp> re) {
- DCHECK_EQ(re->TypeTag(), JSRegExp::EXPERIMENTAL);
-#ifdef VERIFY_HEAP
- re->JSRegExpVerify(isolate);
-#endif
+template <class T>
+Handle<ByteArray> VectorToByteArray(Isolate* isolate, Vector<T> data) {
+ STATIC_ASSERT(std::is_trivial<T>::value);
- Handle<String> source(re->Pattern(), isolate);
- if (FLAG_trace_experimental_regexp_engine) {
- StdoutStream{} << "Compiling experimental regexp " << *source << std::endl;
- }
+ int byte_length = sizeof(T) * data.length();
+ Handle<ByteArray> byte_array = isolate->factory()->NewByteArray(byte_length);
+ DisallowHeapAllocation no_gc;
+ MemCopy(byte_array->GetDataStartAddress(), data.begin(), byte_length);
+ return byte_array;
+}
+namespace {
+
+struct CompilationResult {
+ Handle<ByteArray> bytecode;
+ Handle<FixedArray> capture_name_map;
+};
+
+// Compiles source pattern, but doesn't change the regexp object.
+base::Optional<CompilationResult> CompileImpl(Isolate* isolate,
+ Handle<JSRegExp> regexp) {
Zone zone(isolate->allocator(), ZONE_NAME);
+ Handle<String> source(regexp->Pattern(), isolate);
+ JSRegExp::Flags flags = regexp->GetFlags();
+
// Parse and compile the regexp source.
RegExpCompileData parse_result;
- JSRegExp::Flags flags = re->GetFlags();
FlatStringReader reader(isolate, source);
DCHECK(!isolate->has_pending_exception());
@@ -67,28 +80,52 @@ bool ExperimentalRegExp::Compile(Isolate* isolate, Handle<JSRegExp> re) {
if (!parse_success) {
// The pattern was already parsed successfully during initialization, so
// the only way parsing can fail now is because of stack overflow.
- CHECK_EQ(parse_result.error, RegExpError::kStackOverflow);
- USE(RegExp::ThrowRegExpException(isolate, re, source, parse_result.error));
- return false;
+ DCHECK_EQ(parse_result.error, RegExpError::kStackOverflow);
+ USE(RegExp::ThrowRegExpException(isolate, regexp, source,
+ parse_result.error));
+ return base::nullopt;
}
ZoneList<RegExpInstruction> bytecode =
ExperimentalRegExpCompiler::Compile(parse_result.tree, flags, &zone);
- int byte_length = sizeof(RegExpInstruction) * bytecode.length();
- Handle<ByteArray> bytecode_byte_array =
- isolate->factory()->NewByteArray(byte_length);
- MemCopy(bytecode_byte_array->GetDataStartAddress(), bytecode.begin(),
- byte_length);
+ CompilationResult result;
+ result.bytecode = VectorToByteArray(isolate, bytecode.ToVector());
+ result.capture_name_map = parse_result.capture_name_map;
+ return result;
+}
+
+} // namespace
- re->SetDataAt(JSRegExp::kIrregexpLatin1BytecodeIndex, *bytecode_byte_array);
- re->SetDataAt(JSRegExp::kIrregexpUC16BytecodeIndex, *bytecode_byte_array);
+bool ExperimentalRegExp::Compile(Isolate* isolate, Handle<JSRegExp> re) {
+ DCHECK(FLAG_enable_experimental_regexp_engine);
+ DCHECK_EQ(re->TypeTag(), JSRegExp::EXPERIMENTAL);
+#ifdef VERIFY_HEAP
+ re->JSRegExpVerify(isolate);
+#endif
+
+ Handle<String> source(re->Pattern(), isolate);
+ if (FLAG_trace_experimental_regexp_engine) {
+ StdoutStream{} << "Compiling experimental regexp " << *source << std::endl;
+ }
+
+ base::Optional<CompilationResult> compilation_result =
+ CompileImpl(isolate, re);
+ if (!compilation_result.has_value()) {
+ DCHECK(isolate->has_pending_exception());
+ return false;
+ }
+
+ re->SetDataAt(JSRegExp::kIrregexpLatin1BytecodeIndex,
+ *compilation_result->bytecode);
+ re->SetDataAt(JSRegExp::kIrregexpUC16BytecodeIndex,
+ *compilation_result->bytecode);
Handle<Code> trampoline = BUILTIN_CODE(isolate, RegExpExperimentalTrampoline);
re->SetDataAt(JSRegExp::kIrregexpLatin1CodeIndex, *trampoline);
re->SetDataAt(JSRegExp::kIrregexpUC16CodeIndex, *trampoline);
- re->SetCaptureNameMap(parse_result.capture_name_map);
+ re->SetCaptureNameMap(compilation_result->capture_name_map);
return true;
}
@@ -101,45 +138,52 @@ Vector<RegExpInstruction> AsInstructionSequence(ByteArray raw_bytes) {
return Vector<RegExpInstruction>(inst_begin, inst_num);
}
+namespace {
+
+int32_t ExecRawImpl(Isolate* isolate, RegExp::CallOrigin call_origin,
+ ByteArray bytecode, String subject, int capture_count,
+ int32_t* output_registers, int32_t output_register_count,
+ int32_t subject_index) {
+ DisallowHeapAllocation no_gc;
+
+ int register_count_per_match =
+ JSRegExp::RegistersForCaptureCount(capture_count);
+
+ int32_t result;
+ do {
+ DCHECK(subject.IsFlat());
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ result = ExperimentalRegExpInterpreter::FindMatches(
+ isolate, call_origin, bytecode, register_count_per_match, subject,
+ subject_index, output_registers, output_register_count, &zone);
+ } while (result == RegExp::kInternalRegExpRetry &&
+ call_origin == RegExp::kFromRuntime);
+ return result;
+}
+
+} // namespace
+
// Returns the number of matches.
-int32_t ExperimentalRegExp::ExecRaw(Isolate* isolate, JSRegExp regexp,
- String subject, int32_t* output_registers,
+int32_t ExperimentalRegExp::ExecRaw(Isolate* isolate,
+ RegExp::CallOrigin call_origin,
+ JSRegExp regexp, String subject,
+ int32_t* output_registers,
int32_t output_register_count,
int32_t subject_index) {
- DisallowHeapAllocation no_gc;
-
DCHECK(FLAG_enable_experimental_regexp_engine);
+ DisallowHeapAllocation no_gc;
if (FLAG_trace_experimental_regexp_engine) {
String source = String::cast(regexp.DataAt(JSRegExp::kSourceIndex));
StdoutStream{} << "Executing experimental regexp " << source << std::endl;
}
- Vector<RegExpInstruction> bytecode = AsInstructionSequence(
- ByteArray::cast(regexp.DataAt(JSRegExp::kIrregexpLatin1BytecodeIndex)));
-
- if (FLAG_print_regexp_bytecode) {
- StdoutStream{} << "Bytecode:" << std::endl;
- StdoutStream{} << bytecode << std::endl;
- }
-
- int register_count_per_match =
- JSRegExp::RegistersForCaptureCount(regexp.CaptureCount());
-
- DCHECK(subject.IsFlat());
- String::FlatContent subject_content = subject.GetFlatContent(no_gc);
+ ByteArray bytecode =
+ ByteArray::cast(regexp.DataAt(JSRegExp::kIrregexpLatin1BytecodeIndex));
- Zone zone(isolate->allocator(), ZONE_NAME);
-
- if (subject_content.IsOneByte()) {
- return ExperimentalRegExpInterpreter::FindMatchesNfaOneByte(
- bytecode, register_count_per_match, subject_content.ToOneByteVector(),
- subject_index, output_registers, output_register_count, &zone);
- } else {
- return ExperimentalRegExpInterpreter::FindMatchesNfaTwoByte(
- bytecode, register_count_per_match, subject_content.ToUC16Vector(),
- subject_index, output_registers, output_register_count, &zone);
- }
+ return ExecRawImpl(isolate, call_origin, bytecode, subject,
+ regexp.CaptureCount(), output_registers,
+ output_register_count, subject_index);
}
int32_t ExperimentalRegExp::MatchForCallFromJs(
@@ -148,7 +192,6 @@ int32_t ExperimentalRegExp::MatchForCallFromJs(
Address backtrack_stack, RegExp::CallOrigin call_origin, Isolate* isolate,
Address regexp) {
DCHECK(FLAG_enable_experimental_regexp_engine);
-
DCHECK_NOT_NULL(isolate);
DCHECK_NOT_NULL(output_registers);
DCHECK(call_origin == RegExp::CallOrigin::kFromJs);
@@ -162,15 +205,14 @@ int32_t ExperimentalRegExp::MatchForCallFromJs(
JSRegExp regexp_obj = JSRegExp::cast(Object(regexp));
- return ExecRaw(isolate, regexp_obj, subject_string, output_registers,
- output_register_count, start_position);
+ return ExecRaw(isolate, RegExp::kFromJs, regexp_obj, subject_string,
+ output_registers, output_register_count, start_position);
}
MaybeHandle<Object> ExperimentalRegExp::Exec(
Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
int subject_index, Handle<RegExpMatchInfo> last_match_info) {
DCHECK(FLAG_enable_experimental_regexp_engine);
-
DCHECK_EQ(regexp->TypeTag(), JSRegExp::EXPERIMENTAL);
#ifdef VERIFY_HEAP
regexp->JSRegExpVerify(isolate);
@@ -197,16 +239,78 @@ MaybeHandle<Object> ExperimentalRegExp::Exec(
output_registers_release.reset(output_registers);
}
- int num_matches = ExecRaw(isolate, *regexp, *subject, output_registers,
- output_register_count, subject_index);
+ int num_matches =
+ ExecRaw(isolate, RegExp::kFromRuntime, *regexp, *subject,
+ output_registers, output_register_count, subject_index);
- if (num_matches == 0) {
+ if (num_matches > 0) {
+ DCHECK_EQ(num_matches, 1);
+ return RegExp::SetLastMatchInfo(isolate, last_match_info, subject,
+ capture_count, output_registers);
+ } else if (num_matches == 0) {
return isolate->factory()->null_value();
} else {
+ DCHECK_LT(num_matches, 0);
+ DCHECK(isolate->has_pending_exception());
+ return MaybeHandle<Object>();
+ }
+}
+
+int32_t ExperimentalRegExp::OneshotExecRaw(Isolate* isolate,
+ Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int32_t* output_registers,
+ int32_t output_register_count,
+ int32_t subject_index) {
+ DCHECK(FLAG_enable_experimental_regexp_engine_on_excessive_backtracks);
+
+ if (FLAG_trace_experimental_regexp_engine) {
+ StdoutStream{} << "Experimental execution (oneshot) of regexp "
+ << regexp->Pattern() << std::endl;
+ }
+
+ base::Optional<CompilationResult> compilation_result =
+ CompileImpl(isolate, regexp);
+ if (!compilation_result.has_value()) return RegExp::kInternalRegExpException;
+
+ DisallowHeapAllocation no_gc;
+ return ExecRawImpl(isolate, RegExp::kFromRuntime,
+ *compilation_result->bytecode, *subject,
+ regexp->CaptureCount(), output_registers,
+ output_register_count, subject_index);
+}
+
+MaybeHandle<Object> ExperimentalRegExp::OneshotExec(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int subject_index, Handle<RegExpMatchInfo> last_match_info) {
+ DCHECK(FLAG_enable_experimental_regexp_engine_on_excessive_backtracks);
+ DCHECK_NE(regexp->TypeTag(), JSRegExp::NOT_COMPILED);
+
+ int capture_count = regexp->CaptureCount();
+ int output_register_count = JSRegExp::RegistersForCaptureCount(capture_count);
+
+ int32_t* output_registers;
+ std::unique_ptr<int32_t[]> output_registers_release;
+ if (output_register_count <= Isolate::kJSRegexpStaticOffsetsVectorSize) {
+ output_registers = isolate->jsregexp_static_offsets_vector();
+ } else {
+ output_registers = NewArray<int32_t>(output_register_count);
+ output_registers_release.reset(output_registers);
+ }
+
+ int num_matches = OneshotExecRaw(isolate, regexp, subject, output_registers,
+ output_register_count, subject_index);
+
+ if (num_matches > 0) {
DCHECK_EQ(num_matches, 1);
return RegExp::SetLastMatchInfo(isolate, last_match_info, subject,
capture_count, output_registers);
- return last_match_info;
+ } else if (num_matches == 0) {
+ return isolate->factory()->null_value();
+ } else {
+ DCHECK_LT(num_matches, 0);
+ DCHECK(isolate->has_pending_exception());
+ return MaybeHandle<Object>();
}
}
diff --git a/deps/v8/src/regexp/experimental/experimental.h b/deps/v8/src/regexp/experimental/experimental.h
index 02f535f621..a0ee8d1081 100644
--- a/deps/v8/src/regexp/experimental/experimental.h
+++ b/deps/v8/src/regexp/experimental/experimental.h
@@ -39,10 +39,22 @@ class ExperimentalRegExp final : public AllStatic {
static MaybeHandle<Object> Exec(Isolate* isolate, Handle<JSRegExp> regexp,
Handle<String> subject, int index,
Handle<RegExpMatchInfo> last_match_info);
- static int32_t ExecRaw(Isolate* isolate, JSRegExp regexp, String subject,
+ static int32_t ExecRaw(Isolate* isolate, RegExp::CallOrigin call_origin,
+ JSRegExp regexp, String subject,
int32_t* output_registers,
int32_t output_register_count, int32_t subject_index);
+ // Compile and execute a regexp with the experimental engine, regardless of
+ // its type tag. The regexp itself is not changed (apart from lastIndex).
+ static MaybeHandle<Object> OneshotExec(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int index, Handle<RegExpMatchInfo> last_match_info);
+ static int32_t OneshotExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int32_t* output_registers,
+ int32_t output_register_count,
+ int32_t subject_index);
+
static constexpr bool kSupportsUnicode = false;
};
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 2135e977a7..27c1300ced 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -116,6 +116,7 @@ RegExpMacroAssemblerIA32::~RegExpMacroAssemblerIA32() {
exit_label_.Unuse();
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
+ fallback_label_.Unuse();
}
@@ -148,8 +149,13 @@ void RegExpMacroAssemblerIA32::Backtrack() {
__ cmp(Operand(ebp, kBacktrackCount), Immediate(backtrack_limit()));
__ j(not_equal, &next);
- // Exceeded limits are treated as a failed match.
- Fail();
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ jmp(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
__ bind(&next);
}
@@ -940,6 +946,12 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ jmp(&return_eax);
}
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ mov(eax, FALLBACK_TO_EXPERIMENTAL);
+ __ jmp(&return_eax);
+ }
+
CodeDesc code_desc;
masm_->GetCode(masm_->isolate(), &code_desc);
Handle<Code> code =
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index a30bff29a1..0cb29979d7 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -192,6 +192,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
+ Label fallback_label_;
};
} // namespace internal
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index db79011284..e1b1119c17 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -129,6 +129,7 @@ RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
internal_failure_label_.Unuse();
+ fallback_label_.Unuse();
}
@@ -165,8 +166,13 @@ void RegExpMacroAssemblerMIPS::Backtrack() {
__ Sw(a0, MemOperand(frame_pointer(), kBacktrackCount));
__ Branch(&next, ne, a0, Operand(backtrack_limit()));
- // Exceeded limits are treated as a failed match.
- Fail();
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ jmp(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
__ bind(&next);
}
@@ -910,6 +916,12 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ li(v0, Operand(EXCEPTION));
__ jmp(&return_v0);
}
+
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ li(v0, Operand(FALLBACK_TO_EXPERIMENTAL));
+ __ jmp(&return_v0);
+ }
}
CodeDesc code_desc;
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index e2aea1b091..dd1c27a7db 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -211,6 +211,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
Label check_preempt_label_;
Label stack_overflow_label_;
Label internal_failure_label_;
+ Label fallback_label_;
};
} // namespace internal
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 309cebfcb9..48252a206e 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -165,6 +165,7 @@ RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
internal_failure_label_.Unuse();
+ fallback_label_.Unuse();
}
@@ -201,8 +202,13 @@ void RegExpMacroAssemblerMIPS::Backtrack() {
__ Sd(a0, MemOperand(frame_pointer(), kBacktrackCount));
__ Branch(&next, ne, a0, Operand(backtrack_limit()));
- // Exceeded limits are treated as a failed match.
- Fail();
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ jmp(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
__ bind(&next);
}
@@ -946,6 +952,12 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ li(v0, Operand(EXCEPTION));
__ jmp(&return_v0);
}
+
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ li(v0, Operand(FALLBACK_TO_EXPERIMENTAL));
+ __ jmp(&return_v0);
+ }
}
CodeDesc code_desc;
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index aebfec1060..b9a29ca010 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -216,6 +216,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
Label check_preempt_label_;
Label stack_overflow_label_;
Label internal_failure_label_;
+ Label fallback_label_;
};
} // namespace internal
diff --git a/deps/v8/src/regexp/ppc/OWNERS b/deps/v8/src/regexp/ppc/OWNERS
index 6edd45a6ef..02c2cd757c 100644
--- a/deps/v8/src/regexp/ppc/OWNERS
+++ b/deps/v8/src/regexp/ppc/OWNERS
@@ -2,3 +2,4 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
+vasili.skurydzin@ibm.com
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index 0b1c9a99b7..c0d69297f9 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -136,6 +136,7 @@ RegExpMacroAssemblerPPC::~RegExpMacroAssemblerPPC() {
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
internal_failure_label_.Unuse();
+ fallback_label_.Unuse();
}
@@ -176,11 +177,17 @@ void RegExpMacroAssemblerPPC::Backtrack() {
__ LoadP(r3, MemOperand(frame_pointer(), kBacktrackCount), r0);
__ addi(r3, r3, Operand(1));
__ StoreP(r3, MemOperand(frame_pointer(), kBacktrackCount), r0);
- __ cmpi(r3, Operand(backtrack_limit()));
+ __ mov(r0, Operand(backtrack_limit()));
+ __ cmp(r3, r0);
__ bne(&next);
- // Exceeded limits are treated as a failed match.
- Fail();
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ b(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
__ bind(&next);
}
@@ -952,6 +959,12 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ li(r3, Operand(EXCEPTION));
__ b(&return_r3);
}
+
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ li(r3, Operand(FALLBACK_TO_EXPERIMENTAL));
+ __ b(&return_r3);
+ }
}
CodeDesc code_desc;
@@ -1140,7 +1153,6 @@ void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
__ mov(ip, Operand(stack_guard_check));
EmbeddedData d = EmbeddedData::FromBlob();
- CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
__ mov(r0, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
__ Call(r0);
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index f6b959837f..18b7c5b110 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -197,6 +197,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
Label check_preempt_label_;
Label stack_overflow_label_;
Label internal_failure_label_;
+ Label fallback_label_;
};
// Set of non-volatile registers saved/restored by generated regexp code.
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.cc b/deps/v8/src/regexp/regexp-bytecode-generator.cc
index 8abd15384e..262d788068 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.cc
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.cc
@@ -132,7 +132,11 @@ void RegExpBytecodeGenerator::PopCurrentPosition() { Emit(BC_POP_CP, 0); }
void RegExpBytecodeGenerator::PushCurrentPosition() { Emit(BC_PUSH_CP, 0); }
-void RegExpBytecodeGenerator::Backtrack() { Emit(BC_POP_BT, 0); }
+void RegExpBytecodeGenerator::Backtrack() {
+ int error_code =
+ can_fallback() ? RegExp::RE_FALLBACK_TO_EXPERIMENTAL : RegExp::RE_FAILURE;
+ Emit(BC_POP_BT, error_code);
+}
void RegExpBytecodeGenerator::GoTo(Label* l) {
if (advance_current_end_ == pc_) {
@@ -368,7 +372,7 @@ void RegExpBytecodeGenerator::IfRegisterEqPos(int register_index,
Handle<HeapObject> RegExpBytecodeGenerator::GetCode(Handle<String> source) {
Bind(&backtrack_);
- Emit(BC_POP_BT, 0);
+ Backtrack();
Handle<ByteArray> array;
if (FLAG_regexp_peephole_optimization) {
diff --git a/deps/v8/src/regexp/regexp-compiler.cc b/deps/v8/src/regexp/regexp-compiler.cc
index ce1197a55b..fe032bcfdd 100644
--- a/deps/v8/src/regexp/regexp-compiler.cc
+++ b/deps/v8/src/regexp/regexp-compiler.cc
@@ -1777,10 +1777,11 @@ class LoopInitializationMarker {
DCHECK(node_->traversed_loop_initialization_node_);
node_->traversed_loop_initialization_node_ = false;
}
+ LoopInitializationMarker(const LoopInitializationMarker&) = delete;
+ LoopInitializationMarker& operator=(const LoopInitializationMarker&) = delete;
private:
LoopChoiceNode* node_;
- DISALLOW_COPY_AND_ASSIGN(LoopInitializationMarker);
};
// Temporarily decrements min_loop_iterations_.
@@ -1791,10 +1792,11 @@ class IterationDecrementer {
--node_->min_loop_iterations_;
}
~IterationDecrementer() { ++node_->min_loop_iterations_; }
+ IterationDecrementer(const IterationDecrementer&) = delete;
+ IterationDecrementer& operator=(const IterationDecrementer&) = delete;
private:
LoopChoiceNode* node_;
- DISALLOW_COPY_AND_ASSIGN(IterationDecrementer);
};
RegExpNode* SeqRegExpNode::FilterOneByte(int depth) {
diff --git a/deps/v8/src/regexp/regexp-error.h b/deps/v8/src/regexp/regexp-error.h
index 6145b404ab..628f93638e 100644
--- a/deps/v8/src/regexp/regexp-error.h
+++ b/deps/v8/src/regexp/regexp-error.h
@@ -30,6 +30,7 @@ namespace internal {
T(InvalidQuantifier, "Invalid quantifier") \
T(InvalidGroup, "Invalid group") \
T(MultipleFlagDashes, "Multiple dashes in flag group") \
+ T(NotLinear, "Cannot be executed in linear time") \
T(RepeatedFlag, "Repeated flag in flag group") \
T(InvalidFlagGroup, "Invalid flag group") \
T(TooManyCaptures, "Too many captures") \
diff --git a/deps/v8/src/regexp/regexp-interpreter.cc b/deps/v8/src/regexp/regexp-interpreter.cc
index 80442a8db6..a73a9d3fcc 100644
--- a/deps/v8/src/regexp/regexp-interpreter.cc
+++ b/deps/v8/src/regexp/regexp-interpreter.cc
@@ -125,6 +125,8 @@ uint32_t LoadPacked24Unsigned(int32_t bytecode_and_packed_arg) {
class BacktrackStack {
public:
BacktrackStack() = default;
+ BacktrackStack(const BacktrackStack&) = delete;
+ BacktrackStack& operator=(const BacktrackStack&) = delete;
V8_WARN_UNUSED_RESULT bool push(int v) {
data_.emplace_back(v);
@@ -157,8 +159,6 @@ class BacktrackStack {
static constexpr int kMaxSize =
RegExpStack::kMaximumStackSize / sizeof(ValueT);
-
- DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
};
// Registers used during interpreter execution. These consist of output
@@ -521,8 +521,8 @@ IrregexpInterpreter::Result RawMatch(
BYTECODE(POP_BT) {
STATIC_ASSERT(JSRegExp::kNoBacktrackLimit == 0);
if (++backtrack_count == backtrack_limit) {
- // Exceeded limits are treated as a failed match.
- return IrregexpInterpreter::FAILURE;
+ int return_code = LoadPacked24Signed(insn);
+ return static_cast<IrregexpInterpreter::Result>(return_code);
}
IrregexpInterpreter::Result return_code =
diff --git a/deps/v8/src/regexp/regexp-interpreter.h b/deps/v8/src/regexp/regexp-interpreter.h
index be96476443..9b4a8c6c30 100644
--- a/deps/v8/src/regexp/regexp-interpreter.h
+++ b/deps/v8/src/regexp/regexp-interpreter.h
@@ -19,6 +19,7 @@ class V8_EXPORT_PRIVATE IrregexpInterpreter : public AllStatic {
SUCCESS = RegExp::kInternalRegExpSuccess,
EXCEPTION = RegExp::kInternalRegExpException,
RETRY = RegExp::kInternalRegExpRetry,
+ FALLBACK_TO_EXPERIMENTAL = RegExp::kInternalRegExpFallbackToExperimental,
};
// In case a StackOverflow occurs, a StackOverflowException is created and
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index cf4346309e..62a72b1661 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -315,7 +315,7 @@ int NativeRegExpMacroAssembler::Execute(
int result =
fn.Call(input.ptr(), start_offset, input_start, input_end, output,
output_size, stack_base, call_origin, isolate, regexp.ptr());
- DCHECK(result >= RETRY);
+ DCHECK_GE(result, SMALLEST_REGEXP_RESULT);
if (result == EXCEPTION && !isolate->has_pending_exception()) {
// We detected a stack overflow (on the backtrack stack) in RegExp code,
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index 52465610cb..f1dc57db64 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -183,10 +183,19 @@ class RegExpMacroAssembler {
void set_slow_safe(bool ssc) { slow_safe_compiler_ = ssc; }
bool slow_safe() { return slow_safe_compiler_; }
+ // Controls after how many backtracks irregexp should abort execution. If it
+ // can fall back to the experimental engine (see `set_can_fallback`), it will
+ // return the appropriate error code, otherwise it will return the number of
+ // matches found so far (perhaps none).
void set_backtrack_limit(uint32_t backtrack_limit) {
backtrack_limit_ = backtrack_limit;
}
+ // Set whether or not irregexp can fall back to the experimental engine on
+ // excessive backtracking. The number of backtracks considered excessive can
+ // be controlled with set_backtrack_limit.
+ void set_can_fallback(bool val) { can_fallback_ = val; }
+
enum GlobalMode {
NOT_GLOBAL,
GLOBAL_NO_ZERO_LENGTH_CHECK,
@@ -211,9 +220,12 @@ class RegExpMacroAssembler {
}
uint32_t backtrack_limit() const { return backtrack_limit_; }
+ bool can_fallback() const { return can_fallback_; }
+
private:
bool slow_safe_compiler_;
uint32_t backtrack_limit_ = JSRegExp::kNoBacktrackLimit;
+ bool can_fallback_ = false;
GlobalMode global_mode_;
Isolate* isolate_;
Zone* zone_;
@@ -228,16 +240,20 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
// RETRY: Something significant changed during execution, and the matching
// should be retried from scratch.
// EXCEPTION: Something failed during execution. If no exception has been
- // thrown, it's an internal out-of-memory, and the caller should
- // throw the exception.
+ // thrown, it's an internal out-of-memory, and the caller should
+ // throw the exception.
// FAILURE: Matching failed.
// SUCCESS: Matching succeeded, and the output array has been filled with
- // capture positions.
+ // capture positions.
+ // FALLBACK_TO_EXPERIMENTAL: Execute the regexp on this subject using the
+ // experimental engine instead.
enum Result {
FAILURE = RegExp::kInternalRegExpFailure,
SUCCESS = RegExp::kInternalRegExpSuccess,
EXCEPTION = RegExp::kInternalRegExpException,
RETRY = RegExp::kInternalRegExpRetry,
+ FALLBACK_TO_EXPERIMENTAL = RegExp::kInternalRegExpFallbackToExperimental,
+ SMALLEST_REGEXP_RESULT = RegExp::kInternalRegExpSmallestResult,
};
NativeRegExpMacroAssembler(Isolate* isolate, Zone* zone);
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index fa58764aaa..622baadc07 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -1829,15 +1829,6 @@ bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
return success;
}
-bool RegExpParser::VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
- FlatStringReader* input,
- JSRegExp::Flags flags,
- RegExpCompileData* result,
- const DisallowHeapAllocation& no_gc) {
- RegExpParser parser(input, flags, isolate, zone);
- return parser.Parse(result, no_gc);
-}
-
RegExpBuilder::RegExpBuilder(Zone* zone, JSRegExp::Flags flags)
: zone_(zone),
pending_empty_(false),
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index 74b653b47e..23afe9f939 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -159,10 +159,6 @@ class V8_EXPORT_PRIVATE RegExpParser {
static bool ParseRegExp(Isolate* isolate, Zone* zone, FlatStringReader* input,
JSRegExp::Flags flags, RegExpCompileData* result);
- static bool VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
- FlatStringReader* input, JSRegExp::Flags flags,
- RegExpCompileData* result,
- const DisallowHeapAllocation& no_gc);
private:
bool Parse(RegExpCompileData* result, const DisallowHeapAllocation&);
diff --git a/deps/v8/src/regexp/regexp-stack.cc b/deps/v8/src/regexp/regexp-stack.cc
index 7f47aec5ae..9a80f6f211 100644
--- a/deps/v8/src/regexp/regexp-stack.cc
+++ b/deps/v8/src/regexp/regexp-stack.cc
@@ -14,12 +14,18 @@ RegExpStackScope::RegExpStackScope(Isolate* isolate)
: regexp_stack_(isolate->regexp_stack()) {
// Initialize, if not already initialized.
regexp_stack_->EnsureCapacity(0);
+ // Irregexp is not reentrant in several ways; in particular, the
+ // RegExpStackScope is not reentrant since the destructor frees allocated
+ // memory. Protect against reentrancy here.
+ CHECK(!regexp_stack_->is_in_use());
+ regexp_stack_->set_is_in_use(true);
}
RegExpStackScope::~RegExpStackScope() {
// Reset the buffer if it has grown.
regexp_stack_->Reset();
+ DCHECK(!regexp_stack_->is_in_use());
}
RegExpStack::RegExpStack() : thread_local_(this), isolate_(nullptr) {}
@@ -36,17 +42,15 @@ char* RegExpStack::ArchiveStack(char* to) {
DCHECK(thread_local_.owns_memory_);
}
- size_t size = sizeof(thread_local_);
- MemCopy(reinterpret_cast<void*>(to), &thread_local_, size);
+ MemCopy(reinterpret_cast<void*>(to), &thread_local_, kThreadLocalSize);
thread_local_ = ThreadLocal(this);
- return to + size;
+ return to + kThreadLocalSize;
}
char* RegExpStack::RestoreStack(char* from) {
- size_t size = sizeof(thread_local_);
- MemCopy(&thread_local_, reinterpret_cast<void*>(from), size);
- return from + size;
+ MemCopy(&thread_local_, reinterpret_cast<void*>(from), kThreadLocalSize);
+ return from + kThreadLocalSize;
}
void RegExpStack::Reset() { thread_local_.ResetToStaticStack(this); }
@@ -60,6 +64,7 @@ void RegExpStack::ThreadLocal::ResetToStaticStack(RegExpStack* regexp_stack) {
limit_ = reinterpret_cast<Address>(regexp_stack->static_stack_) +
kStackLimitSlack * kSystemPointerSize;
owns_memory_ = false;
+ is_in_use_ = false;
}
void RegExpStack::ThreadLocal::FreeAndInvalidate() {
diff --git a/deps/v8/src/regexp/regexp-stack.h b/deps/v8/src/regexp/regexp-stack.h
index 9394398fcc..25a213e471 100644
--- a/deps/v8/src/regexp/regexp-stack.h
+++ b/deps/v8/src/regexp/regexp-stack.h
@@ -26,13 +26,13 @@ class RegExpStackScope {
// Initializes the stack memory area if necessary.
explicit RegExpStackScope(Isolate* isolate);
~RegExpStackScope(); // Releases the stack if it has grown.
+ RegExpStackScope(const RegExpStackScope&) = delete;
+ RegExpStackScope& operator=(const RegExpStackScope&) = delete;
RegExpStack* stack() const { return regexp_stack_; }
private:
RegExpStack* regexp_stack_;
-
- DISALLOW_COPY_AND_ASSIGN(RegExpStackScope);
};
@@ -40,6 +40,8 @@ class RegExpStack {
public:
RegExpStack();
~RegExpStack();
+ RegExpStack(const RegExpStack&) = delete;
+ RegExpStack& operator=(const RegExpStack&) = delete;
// Number of allocated locations on the stack below the limit.
// No sequence of pushes must be longer that this without doing a stack-limit
@@ -68,9 +70,12 @@ class RegExpStack {
// If passing zero, the default/minimum size buffer is allocated.
Address EnsureCapacity(size_t size);
+ bool is_in_use() const { return thread_local_.is_in_use_; }
+ void set_is_in_use(bool v) { thread_local_.is_in_use_ = v; }
+
// Thread local archiving.
static constexpr int ArchiveSpacePerThread() {
- return static_cast<int>(sizeof(ThreadLocal));
+ return static_cast<int>(kThreadLocalSize);
}
char* ArchiveStack(char* to);
char* RestoreStack(char* from);
@@ -112,10 +117,12 @@ class RegExpStack {
size_t memory_size_ = 0;
Address limit_ = kNullAddress;
bool owns_memory_ = false; // Whether memory_ is owned and must be freed.
+ bool is_in_use_ = false; // To guard against reentrancy.
void ResetToStaticStack(RegExpStack* regexp_stack);
void FreeAndInvalidate();
};
+ static constexpr size_t kThreadLocalSize = sizeof(ThreadLocal);
// Address of top of memory used as stack.
Address memory_top_address_address() {
@@ -133,8 +140,6 @@ class RegExpStack {
friend class ExternalReference;
friend class Isolate;
friend class RegExpStackScope;
-
- DISALLOW_COPY_AND_ASSIGN(RegExpStack);
};
} // namespace internal
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index 556edbdac8..07d1b5d8f3 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -173,9 +173,10 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
// with the init order in the bootstrapper).
InternalIndex kExecIndex(JSRegExp::kExecFunctionDescriptorIndex);
DCHECK_EQ(*(isolate->factory()->exec_string()),
- proto_map.instance_descriptors().GetKey(kExecIndex));
- if (proto_map.instance_descriptors().GetDetails(kExecIndex).constness() !=
- PropertyConstness::kConst) {
+ proto_map.instance_descriptors(kRelaxedLoad).GetKey(kExecIndex));
+ if (proto_map.instance_descriptors(kRelaxedLoad)
+ .GetDetails(kExecIndex)
+ .constness() != PropertyConstness::kConst) {
return false;
}
diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc
index 569acdab48..b62ad1fff8 100644
--- a/deps/v8/src/regexp/regexp.cc
+++ b/deps/v8/src/regexp/regexp.cc
@@ -17,6 +17,7 @@
#include "src/regexp/regexp-macro-assembler-arch.h"
#include "src/regexp/regexp-macro-assembler-tracer.h"
#include "src/regexp/regexp-parser.h"
+#include "src/regexp/regexp-utils.h"
#include "src/strings/string-search.h"
#include "src/utils/ostreams.h"
@@ -88,7 +89,7 @@ class RegExpImpl final : public AllStatic {
static bool Compile(Isolate* isolate, Zone* zone, RegExpCompileData* input,
JSRegExp::Flags flags, Handle<String> pattern,
Handle<String> sample_subject, bool is_one_byte,
- uint32_t backtrack_limit);
+ uint32_t& backtrack_limit);
// For acting on the JSRegExp data FixedArray.
static int IrregexpMaxRegisterCount(FixedArray re);
@@ -119,6 +120,10 @@ void RegExp::ThrowRegExpException(Isolate* isolate, Handle<JSRegExp> re,
error_text));
}
+bool RegExp::IsUnmodifiedRegExp(Isolate* isolate, Handle<JSRegExp> regexp) {
+ return RegExpUtils::IsUnmodifiedRegExp(isolate, regexp);
+}
+
// Identifies the sort of regexps where the regexp engine is faster
// than the code used for atom matches.
static bool HasFewDifferentCharacters(Handle<String> pattern) {
@@ -182,9 +187,22 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
bool has_been_compiled = false;
- if (FLAG_enable_experimental_regexp_engine &&
+ if (FLAG_default_to_experimental_regexp_engine &&
ExperimentalRegExp::CanBeHandled(parse_result.tree, flags,
parse_result.capture_count)) {
+ DCHECK(FLAG_enable_experimental_regexp_engine);
+ ExperimentalRegExp::Initialize(isolate, re, pattern, flags,
+ parse_result.capture_count);
+ has_been_compiled = true;
+ } else if (flags & JSRegExp::kLinear) {
+ DCHECK(FLAG_enable_experimental_regexp_engine);
+ if (!ExperimentalRegExp::CanBeHandled(parse_result.tree, flags,
+ parse_result.capture_count)) {
+ // TODO(mbid): The error could provide a reason for why the regexp can't
+ // be executed in linear time (e.g. due to back references).
+ return RegExp::ThrowRegExpException(isolate, re, pattern,
+ RegExpError::kNotLinear);
+ }
ExperimentalRegExp::Initialize(isolate, re, pattern, flags,
parse_result.capture_count);
has_been_compiled = true;
@@ -248,6 +266,14 @@ bool RegExp::EnsureFullyCompiled(Isolate* isolate, Handle<JSRegExp> re,
}
// static
+MaybeHandle<Object> RegExp::ExperimentalOneshotExec(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int index, Handle<RegExpMatchInfo> last_match_info) {
+ return ExperimentalRegExp::OneshotExec(isolate, regexp, subject, index,
+ last_match_info);
+}
+
+// static
MaybeHandle<Object> RegExp::Exec(Isolate* isolate, Handle<JSRegExp> regexp,
Handle<String> subject, int index,
Handle<RegExpMatchInfo> last_match_info) {
@@ -450,9 +476,10 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
compile_data.compilation_target = re->ShouldProduceBytecode()
? RegExpCompilationTarget::kBytecode
: RegExpCompilationTarget::kNative;
+ uint32_t backtrack_limit = re->BacktrackLimit();
const bool compilation_succeeded =
Compile(isolate, &zone, &compile_data, flags, pattern, sample_subject,
- is_one_byte, re->BacktrackLimit());
+ is_one_byte, backtrack_limit);
if (!compilation_succeeded) {
DCHECK(compile_data.error != RegExpError::kNone);
RegExp::ThrowRegExpException(isolate, re, compile_data.error);
@@ -482,6 +509,7 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
if (compile_data.register_count > register_max) {
SetIrregexpMaxRegisterCount(*data, compile_data.register_count);
}
+ data->set(JSRegExp::kIrregexpBacktrackLimit, Smi::FromInt(backtrack_limit));
if (FLAG_trace_regexp_tier_up) {
PrintF("JSRegExp object %p %s size: %d\n",
@@ -595,6 +623,7 @@ int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
case IrregexpInterpreter::SUCCESS:
case IrregexpInterpreter::EXCEPTION:
case IrregexpInterpreter::FAILURE:
+ case IrregexpInterpreter::FALLBACK_TO_EXPERIMENTAL:
return result;
case IrregexpInterpreter::RETRY:
// The string has changed representation, and we must restart the
@@ -665,13 +694,16 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
int capture_count = regexp->CaptureCount();
return RegExp::SetLastMatchInfo(isolate, last_match_info, subject,
capture_count, output_registers);
- }
- if (res == RegExp::RE_EXCEPTION) {
+ } else if (res == RegExp::RE_FALLBACK_TO_EXPERIMENTAL) {
+ return ExperimentalRegExp::OneshotExec(isolate, regexp, subject,
+ previous_index, last_match_info);
+ } else if (res == RegExp::RE_EXCEPTION) {
DCHECK(isolate->has_pending_exception());
return MaybeHandle<Object>();
+ } else {
+ DCHECK(res == RegExp::RE_FAILURE);
+ return isolate->factory()->null_value();
}
- DCHECK(res == RegExp::RE_FAILURE);
- return isolate->factory()->null_value();
}
// static
@@ -740,15 +772,15 @@ bool RegExp::CompileForTesting(Isolate* isolate, Zone* zone,
Handle<String> pattern,
Handle<String> sample_subject,
bool is_one_byte) {
+ uint32_t backtrack_limit = JSRegExp::kNoBacktrackLimit;
return RegExpImpl::Compile(isolate, zone, data, flags, pattern,
- sample_subject, is_one_byte,
- JSRegExp::kNoBacktrackLimit);
+ sample_subject, is_one_byte, backtrack_limit);
}
bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
JSRegExp::Flags flags, Handle<String> pattern,
Handle<String> sample_subject, bool is_one_byte,
- uint32_t backtrack_limit) {
+ uint32_t& backtrack_limit) {
if (JSRegExp::RegistersForCaptureCount(data->capture_count) >
RegExpMacroAssembler::kMaxRegisterCount) {
data->error = RegExpError::kTooLarge;
@@ -825,7 +857,21 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
}
macro_assembler->set_slow_safe(TooMuchRegExpCode(isolate, pattern));
- macro_assembler->set_backtrack_limit(backtrack_limit);
+ if (FLAG_enable_experimental_regexp_engine_on_excessive_backtracks &&
+ ExperimentalRegExp::CanBeHandled(data->tree, flags,
+ data->capture_count)) {
+ if (backtrack_limit == JSRegExp::kNoBacktrackLimit) {
+ backtrack_limit = FLAG_regexp_backtracks_before_fallback;
+ } else {
+ backtrack_limit =
+ std::min(backtrack_limit, FLAG_regexp_backtracks_before_fallback);
+ }
+ macro_assembler->set_backtrack_limit(backtrack_limit);
+ macro_assembler->set_can_fallback(true);
+ } else {
+ macro_assembler->set_backtrack_limit(backtrack_limit);
+ macro_assembler->set_can_fallback(false);
+ }
// Inserted here, instead of in Assembler, because it depends on information
// in the AST that isn't replicated in the Node structure.
@@ -1014,8 +1060,8 @@ int32_t* RegExpGlobalCache::FetchNext() {
DCHECK(ExperimentalRegExp::IsCompiled(regexp_, isolate_));
DisallowHeapAllocation no_gc;
num_matches_ = ExperimentalRegExp::ExecRaw(
- isolate_, *regexp_, *subject_, register_array_,
- register_array_size_, last_end_index);
+ isolate_, RegExp::kFromRuntime, *regexp_, *subject_,
+ register_array_, register_array_size_, last_end_index);
break;
}
case JSRegExp::IRREGEXP: {
@@ -1035,7 +1081,16 @@ int32_t* RegExpGlobalCache::FetchNext() {
}
}
- if (num_matches_ <= 0) return nullptr;
+ // Fall back to experimental engine if needed and possible.
+ if (num_matches_ == RegExp::kInternalRegExpFallbackToExperimental) {
+ num_matches_ = ExperimentalRegExp::OneshotExecRaw(
+ isolate_, regexp_, subject_, register_array_, register_array_size_,
+ last_end_index);
+ }
+
+ if (num_matches_ <= 0) {
+ return nullptr;
+ }
current_match_index_ = 0;
return register_array_;
} else {
diff --git a/deps/v8/src/regexp/regexp.h b/deps/v8/src/regexp/regexp.h
index a6a3a8f003..3e20b5f80c 100644
--- a/deps/v8/src/regexp/regexp.h
+++ b/deps/v8/src/regexp/regexp.h
@@ -92,16 +92,25 @@ class RegExp final : public AllStatic {
Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
int index, Handle<RegExpMatchInfo> last_match_info);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ ExperimentalOneshotExec(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index,
+ Handle<RegExpMatchInfo> last_match_info);
+
// Integral return values used throughout regexp code layers.
static constexpr int kInternalRegExpFailure = 0;
static constexpr int kInternalRegExpSuccess = 1;
static constexpr int kInternalRegExpException = -1;
static constexpr int kInternalRegExpRetry = -2;
+ static constexpr int kInternalRegExpFallbackToExperimental = -3;
+ static constexpr int kInternalRegExpSmallestResult = -3;
enum IrregexpResult : int32_t {
RE_FAILURE = kInternalRegExpFailure,
RE_SUCCESS = kInternalRegExpSuccess,
RE_EXCEPTION = kInternalRegExpException,
+ RE_RETRY = kInternalRegExpRetry,
+ RE_FALLBACK_TO_EXPERIMENTAL = kInternalRegExpFallbackToExperimental,
};
// Set last match info. If match is nullptr, then setting captures is
@@ -129,6 +138,8 @@ class RegExp final : public AllStatic {
RegExpError error);
static void ThrowRegExpException(Isolate* isolate, Handle<JSRegExp> re,
RegExpError error_text);
+
+ static bool IsUnmodifiedRegExp(Isolate* isolate, Handle<JSRegExp> regexp);
};
// Uses a special global mode of irregexp-generated code to perform a global
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index b574be8d74..9d2e62e1cb 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -137,6 +137,7 @@ RegExpMacroAssemblerS390::~RegExpMacroAssemblerS390() {
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
internal_failure_label_.Unuse();
+ fallback_label_.Unuse();
}
int RegExpMacroAssemblerS390::stack_limit_slack() {
@@ -174,8 +175,13 @@ void RegExpMacroAssemblerS390::Backtrack() {
__ CmpLogicalP(r2, Operand(backtrack_limit()));
__ bne(&next);
- // Exceeded limits are treated as a failed match.
- Fail();
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ jmp(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
__ bind(&next);
}
@@ -949,6 +955,12 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ b(&return_r2);
}
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ LoadImmP(r2, Operand(FALLBACK_TO_EXPERIMENTAL));
+ __ b(&return_r2);
+ }
+
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
Handle<Code> code =
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index e4f88f51b9..a01d409279 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -197,6 +197,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
Label check_preempt_label_;
Label stack_overflow_label_;
Label internal_failure_label_;
+ Label fallback_label_;
};
// Set of non-volatile registers saved/restored by generated regexp code.
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index da0397689f..79574ca993 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -125,6 +125,7 @@ RegExpMacroAssemblerX64::~RegExpMacroAssemblerX64() {
exit_label_.Unuse();
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
+ fallback_label_.Unuse();
}
@@ -157,8 +158,13 @@ void RegExpMacroAssemblerX64::Backtrack() {
__ cmpq(Operand(rbp, kBacktrackCount), Immediate(backtrack_limit()));
__ j(not_equal, &next);
- // Exceeded limits are treated as a failed match.
- Fail();
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ jmp(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
__ bind(&next);
}
@@ -1000,6 +1006,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ jmp(&return_rax);
}
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ Set(rax, FALLBACK_TO_EXPERIMENTAL);
+ __ jmp(&return_rax);
+ }
+
FixupCodeRelativePositions();
CodeDesc code_desc;
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index ea4d45edba..517a05d939 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -248,6 +248,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
+ Label fallback_label_;
};
} // namespace internal
diff --git a/deps/v8/src/roots/DIR_METADATA b/deps/v8/src/roots/DIR_METADATA
new file mode 100644
index 0000000000..ff55846b31
--- /dev/null
+++ b/deps/v8/src/roots/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>GC"
+} \ No newline at end of file
diff --git a/deps/v8/src/roots/OWNERS b/deps/v8/src/roots/OWNERS
index 2d6e1ae7c2..aaffe920bb 100644
--- a/deps/v8/src/roots/OWNERS
+++ b/deps/v8/src/roots/OWNERS
@@ -7,5 +7,3 @@ jkummerow@chromium.org
marja@chromium.org
sigurds@chromium.org
ulan@chromium.org
-
-# COMPONENT: Blink>JavaScript>GC
diff --git a/deps/v8/src/roots/roots.h b/deps/v8/src/roots/roots.h
index 27f2f5792a..744176e35e 100644
--- a/deps/v8/src/roots/roots.h
+++ b/deps/v8/src/roots/roots.h
@@ -85,7 +85,6 @@ class Symbol;
V(Map, bytecode_array_map, BytecodeArrayMap) \
V(Map, code_data_container_map, CodeDataContainerMap) \
V(Map, coverage_info_map, CoverageInfoMap) \
- V(Map, descriptor_array_map, DescriptorArrayMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Map, global_dictionary_map, GlobalDictionaryMap) \
V(Map, many_closures_cell_map, ManyClosuresCellMap) \
@@ -165,11 +164,13 @@ class Symbol;
EmptyClosureFeedbackCellArray) \
V(NumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
- V(FixedArray, empty_ordered_hash_map, EmptyOrderedHashMap) \
- V(FixedArray, empty_ordered_hash_set, EmptyOrderedHashSet) \
+ V(OrderedHashMap, empty_ordered_hash_map, EmptyOrderedHashMap) \
+ V(OrderedHashSet, empty_ordered_hash_set, EmptyOrderedHashSet) \
V(FeedbackMetadata, empty_feedback_metadata, EmptyFeedbackMetadata) \
V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
V(NameDictionary, empty_property_dictionary, EmptyPropertyDictionary) \
+ V(OrderedNameDictionary, empty_ordered_property_dictionary, \
+ EmptyOrderedPropertyDictionary) \
V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
V(WeakFixedArray, empty_weak_fixed_array, EmptyWeakFixedArray) \
V(WeakArrayList, empty_weak_array_list, EmptyWeakArrayList) \
diff --git a/deps/v8/src/runtime/DIR_METADATA b/deps/v8/src/runtime/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/runtime/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/runtime/OWNERS b/deps/v8/src/runtime/OWNERS
index f52e1c9ca8..48d72aea5e 100644
--- a/deps/v8/src/runtime/OWNERS
+++ b/deps/v8/src/runtime/OWNERS
@@ -1,3 +1 @@
file:../../COMMON_OWNERS
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 3e72d5e816..623064fd8a 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -47,13 +47,8 @@ RUNTIME_FUNCTION(Runtime_NewArray) {
DCHECK_LE(3, args.length());
int const argc = args.length() - 3;
// argv points to the arguments constructed by the JavaScript call.
-#ifdef V8_REVERSE_JSARGS
JavaScriptArguments argv(argc, args.address_of_arg_at(0));
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, argc);
-#else
- JavaScriptArguments argv(argc, args.address_of_arg_at(1));
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
-#endif
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, argc + 1);
CONVERT_ARG_HANDLE_CHECKED(HeapObject, type_info, argc + 2);
// TODO(bmeurer): Use MaybeHandle to pass around the AllocationSite.
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 85b4ca767a..fa647b2c04 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
#include <stdlib.h>
+
#include <limits>
#include "src/builtins/accessors.h"
#include "src/common/message-template.h"
#include "src/debug/debug.h"
#include "src/execution/arguments-inl.h"
+#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
@@ -20,6 +20,7 @@
#include "src/objects/lookup-inl.h"
#include "src/objects/smi.h"
#include "src/objects/struct-inl.h"
+#include "src/runtime/runtime-utils.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -138,8 +139,9 @@ inline void SetHomeObject(Isolate* isolate, JSFunction method,
if (method.shared().needs_home_object()) {
const InternalIndex kPropertyIndex(
JSFunction::kMaybeHomeObjectDescriptorIndex);
- CHECK_EQ(method.map().instance_descriptors().GetKey(kPropertyIndex),
- ReadOnlyRoots(isolate).home_object_symbol());
+ CHECK_EQ(
+ method.map().instance_descriptors(kRelaxedLoad).GetKey(kPropertyIndex),
+ ReadOnlyRoots(isolate).home_object_symbol());
FieldIndex field_index =
FieldIndex::ForDescriptor(method.map(), kPropertyIndex);
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index c20d2d69f2..898279cdb6 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -60,8 +60,29 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
namespace {
-inline bool MaybeSpawnNativeContextIndependentCompilationJob() {
- return FLAG_turbo_nci && !FLAG_turbo_nci_as_midtier;
+// Returns false iff an exception was thrown.
+bool MaybeSpawnNativeContextIndependentCompilationJob(
+ Handle<JSFunction> function, ConcurrencyMode mode) {
+ if (!FLAG_turbo_nci || FLAG_turbo_nci_as_midtier) {
+ return true; // Nothing to do.
+ }
+
+ // If delayed codegen is enabled, the first optimization request does not
+ // trigger NCI compilation, since we try to avoid compiling Code that
+ // remains unused in the future. Repeated optimization (possibly in
+ // different native contexts) is taken as a signal that this SFI will
+ // continue to be used in the future, thus we trigger NCI compilation.
+ if (!FLAG_turbo_nci_delayed_codegen ||
+ function->shared().has_optimized_at_least_once()) {
+ if (!Compiler::CompileOptimized(function, mode,
+ CodeKind::NATIVE_CONTEXT_INDEPENDENT)) {
+ return false;
+ }
+ } else {
+ function->shared().set_has_optimized_at_least_once(true);
+ }
+
+ return true;
}
Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
@@ -77,20 +98,8 @@ Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
}
// Possibly compile for NCI caching.
- if (MaybeSpawnNativeContextIndependentCompilationJob()) {
- // The first optimization request does not trigger NCI compilation,
- // since we try to avoid compiling Code that remains unused in the future.
- // Repeated optimization (possibly in different native contexts) is taken
- // as a signal that this SFI will continue to be used in the future, thus
- // we trigger NCI compilation.
- if (function->shared().has_optimized_at_least_once()) {
- if (!Compiler::CompileOptimized(function, mode,
- CodeKind::NATIVE_CONTEXT_INDEPENDENT)) {
- return ReadOnlyRoots(isolate).exception();
- }
- } else {
- function->shared().set_has_optimized_at_least_once(true);
- }
+ if (!MaybeSpawnNativeContextIndependentCompilationJob(function, mode)) {
+ return ReadOnlyRoots(isolate).exception();
}
DCHECK(function->is_compiled());
@@ -132,7 +141,7 @@ RUNTIME_FUNCTION(Runtime_FunctionFirstExecution) {
return function->code();
}
-RUNTIME_FUNCTION(Runtime_EvictOptimizedCodeSlot) {
+RUNTIME_FUNCTION(Runtime_HealOptimizedCodeSlot) {
SealHandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
@@ -140,7 +149,7 @@ RUNTIME_FUNCTION(Runtime_EvictOptimizedCodeSlot) {
DCHECK(function->shared().is_compiled());
function->feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
- function->shared(), "Runtime_EvictOptimizedCodeSlot");
+ function->shared(), "Runtime_HealOptimizedCodeSlot");
return function->code();
}
@@ -299,6 +308,14 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
PrintF(scope.file(), " at AST id %d]\n", ast_id.ToInt());
}
maybe_result = Compiler::GetOptimizedCodeForOSR(function, ast_id, frame);
+
+ // Possibly compile for NCI caching.
+ if (!MaybeSpawnNativeContextIndependentCompilationJob(
+ function, FLAG_concurrent_recompilation
+ ? ConcurrencyMode::kConcurrent
+ : ConcurrencyMode::kNotConcurrent)) {
+ return Object();
+ }
}
// Check whether we ended up with usable optimized code.
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 62ec1fdc24..175e81829c 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -859,7 +859,7 @@ RUNTIME_FUNCTION(Runtime_ProfileCreateSnapshotDataBlob) {
{
i::EmbeddedData d = i::EmbeddedData::FromBlob();
PrintF("Embedded blob is %d bytes\n",
- static_cast<int>(d.code_size() + d.metadata_size()));
+ static_cast<int>(d.code_size() + d.data_size()));
}
FreeCurrentEmbeddedBlob();
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index c38f6e1e4c..3a9075fc7d 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -30,7 +30,7 @@ bool HasBoilerplate(Handle<Object> literal_site) {
void PreInitializeLiteralSite(Handle<FeedbackVector> vector,
FeedbackSlot slot) {
- vector->Set(slot, Smi::FromInt(1));
+ vector->SynchronizedSet(slot, Smi::FromInt(1));
}
enum DeepCopyHints { kNoHints = 0, kObjectIsShallow = 1 };
@@ -110,7 +110,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
if (!copy->IsJSArray(isolate)) {
if (copy->HasFastProperties(isolate)) {
Handle<DescriptorArray> descriptors(
- copy->map(isolate).instance_descriptors(isolate), isolate);
+ copy->map(isolate).instance_descriptors(isolate, kRelaxedLoad),
+ isolate);
for (InternalIndex i : copy->map(isolate).IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
DCHECK_EQ(kField, details.location());
@@ -567,7 +568,7 @@ MaybeHandle<JSObject> CreateLiteral(Isolate* isolate,
JSObject);
creation_context.ExitScope(site, boilerplate);
- vector->Set(literals_slot, *site);
+ vector->SynchronizedSet(literals_slot, *site);
}
STATIC_ASSERT(static_cast<int>(ObjectLiteral::kDisableMementos) ==
@@ -677,7 +678,7 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
PreInitializeLiteralSite(vector, literal_slot);
return *boilerplate;
}
- vector->Set(literal_slot, *boilerplate);
+ vector->SynchronizedSet(literal_slot, *boilerplate);
return *JSRegExp::Copy(boilerplate);
}
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index 04b195b31e..38349bd507 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -13,15 +13,6 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_IsValidSmi) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
-
- CONVERT_NUMBER_CHECKED(int32_t, number, Int32, args[0]);
- return isolate->heap()->ToBoolean(Smi::IsValid(number));
-}
-
-
RUNTIME_FUNCTION(Runtime_StringToNumber) {
HandleScope handle_scope(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 41dea0fe44..993adf47dd 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -108,8 +108,8 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
int nof = receiver_map->NumberOfOwnDescriptors();
if (nof == 0) return false;
InternalIndex descriptor(nof - 1);
- Handle<DescriptorArray> descriptors(receiver_map->instance_descriptors(),
- isolate);
+ Handle<DescriptorArray> descriptors(
+ receiver_map->instance_descriptors(kRelaxedLoad), isolate);
if (descriptors->GetKey(descriptor) != *key) return false;
// (3) The property to be deleted must be deletable.
PropertyDetails details = descriptors->GetDetails(descriptor);
@@ -859,9 +859,7 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTrackingForMap) {
RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- if (!object->IsJSObject()) return Smi::zero();
- Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, js_object, 0);
// It could have been a DCHECK but we call this function directly from tests.
if (!js_object->map().is_deprecated()) return Smi::zero();
// This call must not cause lazy deopts, because it's called from deferred
@@ -869,7 +867,7 @@ RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
// ID. So we just try migration and signal failure if necessary,
// which will also trigger a deopt.
if (!JSObject::TryMigrateInstance(isolate, js_object)) return Smi::zero();
- return *object;
+ return *js_object;
}
static bool IsValidAccessor(Isolate* isolate, Handle<Object> obj) {
@@ -1070,7 +1068,8 @@ RUNTIME_FUNCTION(Runtime_CopyDataPropertiesWithExcludedProperties) {
// If source is undefined or null, throw a non-coercible error.
if (source->IsNullOrUndefined(isolate)) {
- return ErrorUtils::ThrowLoadFromNullOrUndefined(isolate, source);
+ return ErrorUtils::ThrowLoadFromNullOrUndefined(isolate, source,
+ MaybeHandle<Object>());
}
ScopedVector<Handle<Object>> excluded_properties(args.length() - 1);
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 994d6e3710..f6d76a1ecc 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -877,6 +877,23 @@ RUNTIME_FUNCTION(Runtime_RegExpExec) {
isolate, RegExp::Exec(isolate, regexp, subject, index, last_match_info));
}
+RUNTIME_FUNCTION(Runtime_RegExpExperimentalOneshotExec) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
+ CONVERT_INT32_ARG_CHECKED(index, 2);
+ CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 3);
+ // Due to the way the JS calls are constructed this must be less than the
+ // length of a string, i.e. it is always a Smi. We check anyway for security.
+ CHECK_LE(0, index);
+ CHECK_GE(subject->length(), index);
+ isolate->counters()->regexp_entry_runtime()->Increment();
+ RETURN_RESULT_OR_FAILURE(
+ isolate, RegExp::ExperimentalOneshotExec(isolate, regexp, subject, index,
+ last_match_info));
+}
+
namespace {
class MatchInfoBackedMatch : public String::Match {
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 36a48ae513..ed16900abf 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -14,12 +14,11 @@
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime-utils.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 66e522e72e..41e34aaff6 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
#include <memory>
#include <sstream>
@@ -29,6 +27,8 @@
#include "src/objects/js-function-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/smi.h"
+#include "src/regexp/regexp.h"
+#include "src/runtime/runtime-utils.h"
#include "src/snapshot/snapshot.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/ostreams.h"
@@ -254,7 +254,7 @@ RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
RUNTIME_FUNCTION(Runtime_DynamicMapChecksEnabled) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
- return isolate->heap()->ToBoolean(FLAG_dynamic_map_checks);
+ return isolate->heap()->ToBoolean(FLAG_turboprop_dynamic_map_checks);
}
RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
@@ -551,7 +551,7 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
if (function->IsMarkedForOptimization()) {
status |= static_cast<int>(OptimizationStatus::kMarkedForOptimization);
- } else if (function->IsInOptimizationQueue()) {
+ } else if (function->IsMarkedForConcurrentOptimization()) {
status |=
static_cast<int>(OptimizationStatus::kMarkedForConcurrentOptimization);
} else if (function->IsInOptimizationQueue()) {
@@ -1090,6 +1090,16 @@ RUNTIME_FUNCTION(Runtime_HaveSameMap) {
return isolate->heap()->ToBoolean(obj1.map() == obj2.map());
}
+RUNTIME_FUNCTION(Runtime_InLargeObjectSpace) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(HeapObject, obj, 0);
+ return isolate->heap()->ToBoolean(
+ isolate->heap()->new_lo_space()->Contains(obj) ||
+ isolate->heap()->code_lo_space()->Contains(obj) ||
+ isolate->heap()->lo_space()->Contains(obj));
+}
+
RUNTIME_FUNCTION(Runtime_HasElementsInALargeObjectSpace) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
@@ -1125,7 +1135,8 @@ RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
namespace {
v8::ModifyCodeGenerationFromStringsResult DisallowCodegenFromStringsCallback(
- v8::Local<v8::Context> context, v8::Local<v8::Value> source) {
+ v8::Local<v8::Context> context, v8::Local<v8::Value> source,
+ bool is_code_kind) {
return {false, {}};
}
@@ -1278,6 +1289,14 @@ RUNTIME_FUNCTION(Runtime_RegexpTypeTag) {
return *isolate->factory()->NewStringFromAsciiChecked(type_str);
}
+RUNTIME_FUNCTION(Runtime_RegexpIsUnmodified) {
+ HandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ return isolate->heap()->ToBoolean(
+ RegExp::IsUnmodifiedRegExp(isolate, regexp));
+}
+
#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
RUNTIME_FUNCTION(Runtime_Has##Name) { \
CONVERT_ARG_CHECKED(JSObject, obj, 0); \
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 04cb59393f..76753b97fb 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -137,7 +137,7 @@ RUNTIME_FUNCTION(Runtime_ThrowWasmStackOverflow) {
return isolate->StackOverflow();
}
-RUNTIME_FUNCTION(Runtime_WasmThrowTypeError) {
+RUNTIME_FUNCTION(Runtime_WasmThrowJSTypeError) {
// This runtime function is called both from wasm and from e.g. js-to-js
// functions. Hence the "thread in wasm" flag can be either set or not. Both
// is OK, since throwing will trigger unwinding anyway, which sets the flag
@@ -145,7 +145,7 @@ RUNTIME_FUNCTION(Runtime_WasmThrowTypeError) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kWasmTrapTypeError));
+ isolate, NewTypeError(MessageTemplate::kWasmTrapJSTypeError));
}
RUNTIME_FUNCTION(Runtime_WasmThrowCreate) {
@@ -213,6 +213,42 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
return Object(entrypoint);
}
+RUNTIME_FUNCTION(Runtime_WasmCompileWrapper) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
+ CONVERT_ARG_HANDLE_CHECKED(WasmExportedFunctionData, function_data, 1);
+ DCHECK(isolate->context().is_null());
+ isolate->set_context(instance->native_context());
+
+ const wasm::WasmModule* module = instance->module();
+ const int function_index = function_data->function_index();
+ const wasm::WasmFunction function = module->functions[function_index];
+ const wasm::FunctionSig* sig = function.sig;
+
+ MaybeHandle<WasmExternalFunction> maybe_result =
+ WasmInstanceObject::GetWasmExternalFunction(isolate, instance,
+ function_index);
+
+ Handle<WasmExternalFunction> result;
+ if (!maybe_result.ToHandle(&result)) {
+ // We expect the result to be empty in the case of the start function,
+ // which is not an exported function to begin with.
+ DCHECK_EQ(function_index, module->start_function_index);
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
+ Handle<Code> wrapper =
+ wasm::JSToWasmWrapperCompilationUnit::CompileSpecificJSToWasmWrapper(
+ isolate, sig, module);
+
+ result->set_code(*wrapper);
+
+ function_data->set_wrapper_code(*wrapper);
+
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_WasmTriggerTierUp) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -227,32 +263,20 @@ RUNTIME_FUNCTION(Runtime_WasmTriggerTierUp) {
return ReadOnlyRoots(isolate).undefined_value();
}
-// Should be called from within a handle scope
-Handle<JSArrayBuffer> GetArrayBuffer(Handle<WasmInstanceObject> instance,
- Isolate* isolate, uint32_t address) {
- DCHECK(instance->has_memory_object());
- Handle<JSArrayBuffer> array_buffer(instance->memory_object().array_buffer(),
- isolate);
-
- // Should have trapped if address was OOB
- DCHECK_LT(address, array_buffer->byte_length());
- return array_buffer;
-}
-
RUNTIME_FUNCTION(Runtime_WasmAtomicNotify) {
ClearThreadInWasmScope clear_wasm_flag;
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(offset_double, 1);
+ uintptr_t offset = static_cast<uintptr_t>(offset_double);
CONVERT_NUMBER_CHECKED(uint32_t, count, Uint32, args[2]);
- Handle<JSArrayBuffer> array_buffer =
- GetArrayBuffer(instance, isolate, address);
- if (array_buffer->is_shared()) {
- return FutexEmulation::Wake(array_buffer, address, count);
- } else {
- return Smi::FromInt(0);
- }
+ Handle<JSArrayBuffer> array_buffer{instance->memory_object().array_buffer(),
+ isolate};
+ // Should have trapped if address was OOB.
+ DCHECK_LT(offset, array_buffer->byte_length());
+ if (!array_buffer->is_shared()) return Smi::FromInt(0);
+ return FutexEmulation::Wake(array_buffer, offset, count);
}
RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
@@ -260,18 +284,21 @@ RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(offset_double, 1);
+ uintptr_t offset = static_cast<uintptr_t>(offset_double);
CONVERT_NUMBER_CHECKED(int32_t, expected_value, Int32, args[2]);
CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
- Handle<JSArrayBuffer> array_buffer =
- GetArrayBuffer(instance, isolate, address);
+ Handle<JSArrayBuffer> array_buffer{instance->memory_object().array_buffer(),
+ isolate};
+ // Should have trapped if address was OOB.
+ DCHECK_LT(offset, array_buffer->byte_length());
- // Trap if memory is not shared
+ // Trap if memory is not shared.
if (!array_buffer->is_shared()) {
return ThrowWasmError(isolate, MessageTemplate::kAtomicsWaitNotAllowed);
}
- return FutexEmulation::WaitWasm32(isolate, array_buffer, address,
+ return FutexEmulation::WaitWasm32(isolate, array_buffer, offset,
expected_value, timeout_ns->AsInt64());
}
@@ -280,18 +307,21 @@ RUNTIME_FUNCTION(Runtime_WasmI64AtomicWait) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(offset_double, 1);
+ uintptr_t offset = static_cast<uintptr_t>(offset_double);
CONVERT_ARG_HANDLE_CHECKED(BigInt, expected_value, 2);
CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
- Handle<JSArrayBuffer> array_buffer =
- GetArrayBuffer(instance, isolate, address);
+ Handle<JSArrayBuffer> array_buffer{instance->memory_object().array_buffer(),
+ isolate};
+ // Should have trapped if address was OOB.
+ DCHECK_LT(offset, array_buffer->byte_length());
- // Trap if memory is not shared
+ // Trap if memory is not shared.
if (!array_buffer->is_shared()) {
return ThrowWasmError(isolate, MessageTemplate::kAtomicsWaitNotAllowed);
}
- return FutexEmulation::WaitWasm64(isolate, array_buffer, address,
+ return FutexEmulation::WaitWasm64(isolate, array_buffer, offset,
expected_value->AsInt64(),
timeout_ns->AsInt64());
}
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 667b1f0045..a0041ec2c0 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -108,7 +108,7 @@ namespace internal {
F(CompileLazy, 1, 1) \
F(CompileOptimized_Concurrent, 1, 1) \
F(CompileOptimized_NotConcurrent, 1, 1) \
- F(EvictOptimizedCodeSlot, 1, 1) \
+ F(HealOptimizedCodeSlot, 1, 1) \
F(FunctionFirstExecution, 1, 1) \
F(InstantiateAsmJs, 4, 1) \
F(NotifyDeoptimized, 0, 1) \
@@ -276,7 +276,6 @@ namespace internal {
F(GetHoleNaNLower, 0, 1) \
F(GetHoleNaNUpper, 0, 1) \
I(IsSmi, 1, 1) \
- F(IsValidSmi, 1, 1) \
F(MaxSmi, 0, 1) \
F(NumberToStringSlow, 1, 1) \
F(StringParseFloat, 1, 1) \
@@ -388,6 +387,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_REGEXP(F, I) \
I(IsRegExp, 1, 1) \
F(RegExpExec, 4, 1) \
+ F(RegExpExperimentalOneshotExec, 4, 1) \
F(RegExpExecMultiple, 4, 1) \
F(RegExpInitializeAndCompile, 3, 1) \
F(RegExpReplaceRT, 3, 1) \
@@ -507,6 +507,7 @@ namespace internal {
F(HaveSameMap, 2, 1) \
F(HeapObjectVerify, 1, 1) \
F(ICsAreEnabled, 0, 1) \
+ F(InLargeObjectSpace, 1, 1) \
F(InYoungGeneration, 1, 1) \
F(IsAsmWasmCode, 1, 1) \
F(IsBeingInterpreted, 0, 1) \
@@ -518,6 +519,7 @@ namespace internal {
F(RegexpHasBytecode, 2, 1) \
F(RegexpHasNativeCode, 2, 1) \
F(RegexpTypeTag, 1, 1) \
+ F(RegexpIsUnmodified, 1, 1) \
F(MapIteratorProtector, 0, 1) \
F(NeverOptimizeFunction, 1, 1) \
F(NotifyContextDisposed, 0, 1) \
@@ -569,7 +571,7 @@ namespace internal {
F(WasmMemoryGrow, 2, 1) \
F(WasmStackGuard, 0, 1) \
F(WasmThrowCreate, 2, 1) \
- F(WasmThrowTypeError, 0, 1) \
+ F(WasmThrowJSTypeError, 0, 1) \
F(WasmRefFunc, 1, 1) \
F(WasmFunctionTableGet, 3, 1) \
F(WasmFunctionTableSet, 4, 1) \
@@ -579,6 +581,7 @@ namespace internal {
F(WasmTableFill, 4, 1) \
F(WasmIsValidRefValue, 3, 1) \
F(WasmCompileLazy, 2, 1) \
+ F(WasmCompileWrapper, 2, 1) \
F(WasmTriggerTierUp, 1, 1) \
F(WasmDebugBreak, 0, 1) \
F(WasmAllocateRtt, 2, 1)
diff --git a/deps/v8/src/snapshot/DIR_METADATA b/deps/v8/src/snapshot/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/snapshot/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/snapshot/OWNERS b/deps/v8/src/snapshot/OWNERS
index 6fa7f3441c..0cf6544300 100644
--- a/deps/v8/src/snapshot/OWNERS
+++ b/deps/v8/src/snapshot/OWNERS
@@ -2,5 +2,3 @@ delphick@chromium.org
jgruber@chromium.org
leszeks@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 5eec7668a2..f90ef62bad 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -36,9 +36,7 @@ ScriptData::ScriptData(const byte* data, int length)
CodeSerializer::CodeSerializer(Isolate* isolate, uint32_t source_hash)
: Serializer(isolate, Snapshot::kDefaultSerializerFlags),
- source_hash_(source_hash) {
- allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
-}
+ source_hash_(source_hash) {}
// static
ScriptCompiler::CachedData* CodeSerializer::Serialize(
@@ -64,11 +62,11 @@ ScriptCompiler::CachedData* CodeSerializer::Serialize(
// Serialize code object.
Handle<String> source(String::cast(script->source()), isolate);
+ HandleScope scope(isolate);
CodeSerializer cs(isolate, SerializedCodeData::SourceHash(
source, script->origin_options()));
DisallowGarbageCollection no_gc;
- cs.reference_map()->AddAttachedReference(
- reinterpret_cast<void*>(source->ptr()));
+ cs.reference_map()->AddAttachedReference(*source);
ScriptData* script_data = cs.SerializeSharedFunctionInfo(info);
if (FLAG_profile_deserialization) {
@@ -100,13 +98,13 @@ ScriptData* CodeSerializer::SerializeSharedFunctionInfo(
return data.GetScriptData();
}
-bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
- if (!ReadOnlyHeap::Contains(obj)) return false;
+bool CodeSerializer::SerializeReadOnlyObject(Handle<HeapObject> obj) {
+ if (!ReadOnlyHeap::Contains(*obj)) return false;
// For objects on the read-only heap, never serialize the object, but instead
// create a back reference that encodes the page number as the chunk_index and
// the offset within the page as the chunk_offset.
- Address address = obj.address();
+ Address address = obj->address();
BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(address);
uint32_t chunk_index = 0;
ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space();
@@ -115,14 +113,13 @@ bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
++chunk_index;
}
uint32_t chunk_offset = static_cast<uint32_t>(chunk->Offset(address));
- SerializerReference back_reference = SerializerReference::BackReference(
- SnapshotSpace::kReadOnlyHeap, chunk_index, chunk_offset);
- reference_map()->Add(reinterpret_cast<void*>(obj.ptr()), back_reference);
- CHECK(SerializeBackReference(obj));
+ sink_.Put(kReadOnlyHeapRef, "ReadOnlyHeapRef");
+ sink_.PutInt(chunk_index, "ReadOnlyHeapRefChunkIndex");
+ sink_.PutInt(chunk_offset, "ReadOnlyHeapRefChunkOffset");
return true;
}
-void CodeSerializer::SerializeObject(HeapObject obj) {
+void CodeSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
if (SerializeHotObject(obj)) return;
if (SerializeRoot(obj)) return;
@@ -131,60 +128,60 @@ void CodeSerializer::SerializeObject(HeapObject obj) {
if (SerializeReadOnlyObject(obj)) return;
- CHECK(!obj.IsCode());
+ CHECK(!obj->IsCode());
ReadOnlyRoots roots(isolate());
- if (ElideObject(obj)) {
- return SerializeObject(roots.undefined_value());
+ if (ElideObject(*obj)) {
+ return SerializeObject(roots.undefined_value_handle());
}
- if (obj.IsScript()) {
- Script script_obj = Script::cast(obj);
- DCHECK_NE(script_obj.compilation_type(), Script::COMPILATION_TYPE_EVAL);
+ if (obj->IsScript()) {
+ Handle<Script> script_obj = Handle<Script>::cast(obj);
+ DCHECK_NE(script_obj->compilation_type(), Script::COMPILATION_TYPE_EVAL);
// We want to differentiate between undefined and uninitialized_symbol for
// context_data for now. It is hack to allow debugging for scripts that are
// included as a part of custom snapshot. (see debug::Script::IsEmbedded())
- Object context_data = script_obj.context_data();
+ Object context_data = script_obj->context_data();
if (context_data != roots.undefined_value() &&
context_data != roots.uninitialized_symbol()) {
- script_obj.set_context_data(roots.undefined_value());
+ script_obj->set_context_data(roots.undefined_value());
}
// We don't want to serialize host options to avoid serializing unnecessary
// object graph.
- FixedArray host_options = script_obj.host_defined_options();
- script_obj.set_host_defined_options(roots.empty_fixed_array());
+ FixedArray host_options = script_obj->host_defined_options();
+ script_obj->set_host_defined_options(roots.empty_fixed_array());
SerializeGeneric(obj);
- script_obj.set_host_defined_options(host_options);
- script_obj.set_context_data(context_data);
+ script_obj->set_host_defined_options(host_options);
+ script_obj->set_context_data(context_data);
return;
}
- if (obj.IsSharedFunctionInfo()) {
- SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
+ if (obj->IsSharedFunctionInfo()) {
+ Handle<SharedFunctionInfo> sfi = Handle<SharedFunctionInfo>::cast(obj);
// TODO(7110): Enable serializing of Asm modules once the AsmWasmData
// is context independent.
- DCHECK(!sfi.IsApiFunction() && !sfi.HasAsmWasmData());
+ DCHECK(!sfi->IsApiFunction() && !sfi->HasAsmWasmData());
DebugInfo debug_info;
BytecodeArray debug_bytecode_array;
- if (sfi.HasDebugInfo()) {
+ if (sfi->HasDebugInfo()) {
// Clear debug info.
- debug_info = sfi.GetDebugInfo();
+ debug_info = sfi->GetDebugInfo();
if (debug_info.HasInstrumentedBytecodeArray()) {
debug_bytecode_array = debug_info.DebugBytecodeArray();
- sfi.SetDebugBytecodeArray(debug_info.OriginalBytecodeArray());
+ sfi->SetDebugBytecodeArray(debug_info.OriginalBytecodeArray());
}
- sfi.set_script_or_debug_info(debug_info.script());
+ sfi->set_script_or_debug_info(debug_info.script(), kReleaseStore);
}
- DCHECK(!sfi.HasDebugInfo());
+ DCHECK(!sfi->HasDebugInfo());
SerializeGeneric(obj);
// Restore debug info
if (!debug_info.is_null()) {
- sfi.set_script_or_debug_info(debug_info);
+ sfi->set_script_or_debug_info(debug_info, kReleaseStore);
if (!debug_bytecode_array.is_null()) {
- sfi.SetDebugBytecodeArray(debug_bytecode_array);
+ sfi->SetDebugBytecodeArray(debug_bytecode_array);
}
}
return;
@@ -197,24 +194,24 @@ void CodeSerializer::SerializeObject(HeapObject obj) {
// --interpreted-frames-native-stack is on. See v8:9122 for more context
#ifndef V8_TARGET_ARCH_ARM
if (V8_UNLIKELY(FLAG_interpreted_frames_native_stack) &&
- obj.IsInterpreterData()) {
- obj = InterpreterData::cast(obj).bytecode_array();
+ obj->IsInterpreterData()) {
+ obj = handle(InterpreterData::cast(*obj).bytecode_array(), isolate());
}
#endif // V8_TARGET_ARCH_ARM
// Past this point we should not see any (context-specific) maps anymore.
- CHECK(!obj.IsMap());
+ CHECK(!obj->IsMap());
// There should be no references to the global object embedded.
- CHECK(!obj.IsJSGlobalProxy() && !obj.IsJSGlobalObject());
+ CHECK(!obj->IsJSGlobalProxy() && !obj->IsJSGlobalObject());
// Embedded FixedArrays that need rehashing must support rehashing.
- CHECK_IMPLIES(obj.NeedsRehashing(), obj.CanBeRehashed());
+ CHECK_IMPLIES(obj->NeedsRehashing(), obj->CanBeRehashed());
// We expect no instantiated function objects or contexts.
- CHECK(!obj.IsJSFunction() && !obj.IsContext());
+ CHECK(!obj->IsJSFunction() && !obj->IsContext());
SerializeGeneric(obj);
}
-void CodeSerializer::SerializeGeneric(HeapObject heap_object) {
+void CodeSerializer::SerializeGeneric(Handle<HeapObject> heap_object) {
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this, heap_object, &sink_);
serializer.Serialize();
@@ -265,26 +262,27 @@ void CreateInterpreterDataForDeserializedCode(Isolate* isolate,
namespace {
class StressOffThreadDeserializeThread final : public base::Thread {
public:
- explicit StressOffThreadDeserializeThread(LocalIsolate* local_isolate,
+ explicit StressOffThreadDeserializeThread(Isolate* isolate,
const SerializedCodeData* scd)
: Thread(
base::Thread::Options("StressOffThreadDeserializeThread", 2 * MB)),
- local_isolate_(local_isolate),
+ isolate_(isolate),
scd_(scd) {}
MaybeHandle<SharedFunctionInfo> maybe_result() const { return maybe_result_; }
void Run() final {
+ LocalIsolate local_isolate(isolate_, ThreadKind::kBackground);
MaybeHandle<SharedFunctionInfo> local_maybe_result =
ObjectDeserializer::DeserializeSharedFunctionInfoOffThread(
- local_isolate_, scd_, local_isolate_->factory()->empty_string());
+ &local_isolate, scd_, local_isolate.factory()->empty_string());
maybe_result_ =
- local_isolate_->heap()->NewPersistentMaybeHandle(local_maybe_result);
+ local_isolate.heap()->NewPersistentMaybeHandle(local_maybe_result);
}
private:
- LocalIsolate* local_isolate_;
+ Isolate* isolate_;
const SerializedCodeData* scd_;
MaybeHandle<SharedFunctionInfo> maybe_result_;
};
@@ -315,9 +313,7 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
MaybeHandle<SharedFunctionInfo> maybe_result;
// TODO(leszeks): Add LocalHeap support to deserializer
if (false && FLAG_stress_background_compile) {
- LocalIsolate local_isolate(isolate);
-
- StressOffThreadDeserializeThread thread(&local_isolate, &scd);
+ StressOffThreadDeserializeThread thread(isolate, &scd);
CHECK(thread.Start());
thread.Join();
@@ -408,44 +404,29 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
const CodeSerializer* cs) {
DisallowGarbageCollection no_gc;
- std::vector<Reservation> reservations = cs->EncodeReservations();
// Calculate sizes.
- uint32_t reservation_size =
- static_cast<uint32_t>(reservations.size()) * kUInt32Size;
- uint32_t num_stub_keys = 0; // TODO(jgruber): Remove.
- uint32_t stub_keys_size = num_stub_keys * kUInt32Size;
- uint32_t payload_offset = kHeaderSize + reservation_size + stub_keys_size;
- uint32_t padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
- uint32_t size =
- padded_payload_offset + static_cast<uint32_t>(payload->size());
+ uint32_t size = kHeaderSize + static_cast<uint32_t>(payload->size());
DCHECK(IsAligned(size, kPointerAlignment));
// Allocate backing store and create result data.
AllocateData(size);
// Zero out pre-payload data. Part of that is only used for padding.
- memset(data_, 0, padded_payload_offset);
+ memset(data_, 0, kHeaderSize);
// Set header values.
SetMagicNumber();
SetHeaderValue(kVersionHashOffset, Version::Hash());
SetHeaderValue(kSourceHashOffset, cs->source_hash());
SetHeaderValue(kFlagHashOffset, FlagList::Hash());
- SetHeaderValue(kNumReservationsOffset,
- static_cast<uint32_t>(reservations.size()));
SetHeaderValue(kPayloadLengthOffset, static_cast<uint32_t>(payload->size()));
// Zero out any padding in the header.
memset(data_ + kUnalignedHeaderSize, 0, kHeaderSize - kUnalignedHeaderSize);
- // Copy reservation chunk sizes.
- CopyBytes(data_ + kHeaderSize,
- reinterpret_cast<const byte*>(reservations.data()),
- reservation_size);
-
// Copy serialized data.
- CopyBytes(data_ + padded_payload_offset, payload->data(),
+ CopyBytes(data_ + kHeaderSize, payload->data(),
static_cast<size_t>(payload->size()));
SetHeaderValue(kChecksumOffset, Checksum(ChecksummedContent()));
@@ -464,10 +445,7 @@ SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
if (version_hash != Version::Hash()) return VERSION_MISMATCH;
if (source_hash != expected_source_hash) return SOURCE_MISMATCH;
if (flags_hash != FlagList::Hash()) return FLAGS_MISMATCH;
- uint32_t max_payload_length =
- this->size_ -
- POINTER_SIZE_ALIGN(kHeaderSize +
- GetHeaderValue(kNumReservationsOffset) * kInt32Size);
+ uint32_t max_payload_length = this->size_ - kHeaderSize;
if (payload_length > max_payload_length) return LENGTH_MISMATCH;
if (Checksum(ChecksummedContent()) != c) return CHECKSUM_MISMATCH;
return CHECK_SUCCESS;
@@ -494,20 +472,8 @@ ScriptData* SerializedCodeData::GetScriptData() {
return result;
}
-std::vector<SerializedData::Reservation> SerializedCodeData::Reservations()
- const {
- uint32_t size = GetHeaderValue(kNumReservationsOffset);
- std::vector<Reservation> reservations(size);
- memcpy(reservations.data(), data_ + kHeaderSize,
- size * sizeof(SerializedData::Reservation));
- return reservations;
-}
-
Vector<const byte> SerializedCodeData::Payload() const {
- int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
- int payload_offset = kHeaderSize + reservations_size;
- int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
- const byte* payload = data_ + padded_payload_offset;
+ const byte* payload = data_ + kHeaderSize;
DCHECK(IsAligned(reinterpret_cast<intptr_t>(payload), kPointerAlignment));
int length = GetHeaderValue(kPayloadLengthOffset);
DCHECK_EQ(data_ + size_, payload + length);
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index 2daf5200ec..8ca9721d16 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -7,6 +7,7 @@
#include "src/base/macros.h"
#include "src/snapshot/serializer.h"
+#include "src/snapshot/snapshot-data.h"
namespace v8 {
namespace internal {
@@ -17,6 +18,8 @@ class V8_EXPORT_PRIVATE ScriptData {
~ScriptData() {
if (owns_data_) DeleteArray(data_);
}
+ ScriptData(const ScriptData&) = delete;
+ ScriptData& operator=(const ScriptData&) = delete;
const byte* data() const { return data_; }
int length() const { return length_; }
@@ -39,12 +42,12 @@ class V8_EXPORT_PRIVATE ScriptData {
bool rejected_ : 1;
const byte* data_;
int length_;
-
- DISALLOW_COPY_AND_ASSIGN(ScriptData);
};
class CodeSerializer : public Serializer {
public:
+ CodeSerializer(const CodeSerializer&) = delete;
+ CodeSerializer& operator=(const CodeSerializer&) = delete;
V8_EXPORT_PRIVATE static ScriptCompiler::CachedData* Serialize(
Handle<SharedFunctionInfo> info);
@@ -61,16 +64,15 @@ class CodeSerializer : public Serializer {
~CodeSerializer() override { OutputStatistics("CodeSerializer"); }
virtual bool ElideObject(Object obj) { return false; }
- void SerializeGeneric(HeapObject heap_object);
+ void SerializeGeneric(Handle<HeapObject> heap_object);
private:
- void SerializeObject(HeapObject o) override;
+ void SerializeObjectImpl(Handle<HeapObject> o) override;
- bool SerializeReadOnlyObject(HeapObject obj);
+ bool SerializeReadOnlyObject(Handle<HeapObject> obj);
DISALLOW_HEAP_ALLOCATION(no_gc_)
uint32_t source_hash_;
- DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
};
// Wrapper around ScriptData to provide code-serializer-specific functionality.
@@ -92,18 +94,13 @@ class SerializedCodeData : public SerializedData {
// [1] version hash
// [2] source hash
// [3] flag hash
- // [4] number of reservation size entries
- // [5] payload length
- // [6] payload checksum
- // ... reservations
- // ... code stub keys
+ // [4] payload length
+ // [5] payload checksum
// ... serialized payload
static const uint32_t kVersionHashOffset = kMagicNumberOffset + kUInt32Size;
static const uint32_t kSourceHashOffset = kVersionHashOffset + kUInt32Size;
static const uint32_t kFlagHashOffset = kSourceHashOffset + kUInt32Size;
- static const uint32_t kNumReservationsOffset = kFlagHashOffset + kUInt32Size;
- static const uint32_t kPayloadLengthOffset =
- kNumReservationsOffset + kUInt32Size;
+ static const uint32_t kPayloadLengthOffset = kFlagHashOffset + kUInt32Size;
static const uint32_t kChecksumOffset = kPayloadLengthOffset + kUInt32Size;
static const uint32_t kUnalignedHeaderSize = kChecksumOffset + kUInt32Size;
static const uint32_t kHeaderSize = POINTER_SIZE_ALIGN(kUnalignedHeaderSize);
@@ -120,7 +117,6 @@ class SerializedCodeData : public SerializedData {
// Return ScriptData object and relinquish ownership over it to the caller.
ScriptData* GetScriptData();
- std::vector<Reservation> Reservations() const;
Vector<const byte> Payload() const;
static uint32_t SourceHash(Handle<String> source,
diff --git a/deps/v8/src/snapshot/context-deserializer.cc b/deps/v8/src/snapshot/context-deserializer.cc
index ae0865ee28..5ae6dcd0eb 100644
--- a/deps/v8/src/snapshot/context-deserializer.cc
+++ b/deps/v8/src/snapshot/context-deserializer.cc
@@ -5,6 +5,7 @@
#include "src/snapshot/context-deserializer.h"
#include "src/api/api-inl.h"
+#include "src/common/assert-scope.h"
#include "src/heap/heap-inl.h"
#include "src/objects/slots.h"
#include "src/snapshot/snapshot.h"
@@ -16,8 +17,7 @@ MaybeHandle<Context> ContextDeserializer::DeserializeContext(
Isolate* isolate, const SnapshotData* data, bool can_rehash,
Handle<JSGlobalProxy> global_proxy,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
- ContextDeserializer d(data);
- d.SetRehashability(can_rehash);
+ ContextDeserializer d(isolate, data, can_rehash);
MaybeHandle<Object> maybe_result =
d.Deserialize(isolate, global_proxy, embedder_fields_deserializer);
@@ -30,11 +30,6 @@ MaybeHandle<Context> ContextDeserializer::DeserializeContext(
MaybeHandle<Object> ContextDeserializer::Deserialize(
Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
- Initialize(isolate);
- if (!allocator()->ReserveSpace()) {
- V8::FatalProcessOutOfMemory(isolate, "ContextDeserializer");
- }
-
// Replace serialized references to the global proxy and its map with the
// given global proxy and its map.
AddAttachedObject(global_proxy);
@@ -42,26 +37,17 @@ MaybeHandle<Object> ContextDeserializer::Deserialize(
Handle<Object> result;
{
- DisallowGarbageCollection no_gc;
- // Keep track of the code space start and end pointers in case new
- // code objects were unserialized
- CodeSpace* code_space = isolate->heap()->code_space();
- Address start_address = code_space->top();
- Object root;
- VisitRootPointer(Root::kStartupObjectCache, nullptr, FullObjectSlot(&root));
+ // There's no code deserialized here. If this assert fires then that's
+ // changed and logging should be added to notify the profiler et al. of
+ // the new code, which also has to be flushed from instruction cache.
+ DisallowCodeAllocation no_code_allocation;
+
+ result = ReadObject();
DeserializeDeferredObjects();
DeserializeEmbedderFields(embedder_fields_deserializer);
- allocator()->RegisterDeserializedObjectsForBlackAllocation();
-
- // There's no code deserialized here. If this assert fires then that's
- // changed and logging should be added to notify the profiler et al of the
- // new code, which also has to be flushed from instruction cache.
- CHECK_EQ(start_address, code_space->top());
-
LogNewMapEvents();
-
- result = handle(root, isolate);
+ WeakenDescriptorArrays();
}
if (FLAG_rehash_snapshot && can_rehash()) Rehash();
@@ -74,6 +60,7 @@ void ContextDeserializer::SetupOffHeapArrayBufferBackingStores() {
for (Handle<JSArrayBuffer> buffer : new_off_heap_array_buffers()) {
uint32_t store_index = buffer->GetBackingStoreRefForDeserialization();
auto bs = backing_store(store_index);
+ buffer->AllocateExternalPointerEntries(isolate());
SharedFlag shared =
bs && bs->is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared;
buffer->Setup(shared, bs);
@@ -90,9 +77,7 @@ void ContextDeserializer::DeserializeEmbedderFields(
for (int code = source()->Get(); code != kSynchronize;
code = source()->Get()) {
HandleScope scope(isolate());
- SnapshotSpace space = NewObject::Decode(code);
- Handle<JSObject> obj(JSObject::cast(GetBackReferencedObject(space)),
- isolate());
+ Handle<JSObject> obj = Handle<JSObject>::cast(GetBackReferencedObject());
int index = source()->GetInt();
int size = source()->GetInt();
// TODO(yangguo,jgruber): Turn this into a reusable shared buffer.
diff --git a/deps/v8/src/snapshot/context-deserializer.h b/deps/v8/src/snapshot/context-deserializer.h
index 3854902238..6552a0fe45 100644
--- a/deps/v8/src/snapshot/context-deserializer.h
+++ b/deps/v8/src/snapshot/context-deserializer.h
@@ -6,12 +6,14 @@
#define V8_SNAPSHOT_CONTEXT_DESERIALIZER_H_
#include "src/snapshot/deserializer.h"
+#include "src/snapshot/snapshot-data.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
class Context;
+class Isolate;
// Deserializes the context-dependent object graph rooted at a given object.
// The ContextDeserializer is not expected to deserialize any code objects.
@@ -23,8 +25,10 @@ class V8_EXPORT_PRIVATE ContextDeserializer final : public Deserializer {
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer);
private:
- explicit ContextDeserializer(const SnapshotData* data)
- : Deserializer(data, false) {}
+ explicit ContextDeserializer(Isolate* isolate, const SnapshotData* data,
+ bool can_rehash)
+ : Deserializer(isolate, data->Payload(), data->GetMagicNumber(), false,
+ can_rehash) {}
// Deserialize a single object and the objects reachable from it.
MaybeHandle<Object> Deserialize(
diff --git a/deps/v8/src/snapshot/context-serializer.cc b/deps/v8/src/snapshot/context-serializer.cc
index 931ee64176..8060f2845c 100644
--- a/deps/v8/src/snapshot/context-serializer.cc
+++ b/deps/v8/src/snapshot/context-serializer.cc
@@ -74,7 +74,6 @@ ContextSerializer::ContextSerializer(
serialize_embedder_fields_(callback),
can_be_rehashed_(true) {
InitializeCodeAddressMap();
- allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
}
ContextSerializer::~ContextSerializer() {
@@ -88,10 +87,8 @@ void ContextSerializer::Serialize(Context* o,
// Upon deserialization, references to the global proxy and its map will be
// replaced.
- reference_map()->AddAttachedReference(
- reinterpret_cast<void*>(context_.global_proxy().ptr()));
- reference_map()->AddAttachedReference(
- reinterpret_cast<void*>(context_.global_proxy().map().ptr()));
+ reference_map()->AddAttachedReference(context_.global_proxy());
+ reference_map()->AddAttachedReference(context_.global_proxy().map());
// The bootstrap snapshot has a code-stub context. When serializing the
// context snapshot, it is chained into the weak context list on the isolate
@@ -123,7 +120,7 @@ void ContextSerializer::Serialize(Context* o,
Pad();
}
-void ContextSerializer::SerializeObject(HeapObject obj) {
+void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
if (!allow_active_isolate_for_testing()) {
@@ -132,7 +129,7 @@ void ContextSerializer::SerializeObject(HeapObject obj) {
// But in test scenarios there is no way to avoid this. Since we only
// serialize a single context in these cases, and this context does not
// have to be executable, we can simply ignore this.
- DCHECK_IMPLIES(obj.IsNativeContext(), obj == context_);
+ DCHECK_IMPLIES(obj->IsNativeContext(), *obj == context_);
}
if (SerializeHotObject(obj)) return;
@@ -145,7 +142,7 @@ void ContextSerializer::SerializeObject(HeapObject obj) {
return;
}
- if (ShouldBeInTheStartupObjectCache(obj)) {
+ if (ShouldBeInTheStartupObjectCache(*obj)) {
startup_serializer_->SerializeUsingStartupObjectCache(&sink_, obj);
return;
}
@@ -156,31 +153,33 @@ void ContextSerializer::SerializeObject(HeapObject obj) {
DCHECK(!startup_serializer_->ReferenceMapContains(obj));
// All the internalized strings that the context snapshot needs should be
// either in the root table or in the startup object cache.
- DCHECK(!obj.IsInternalizedString());
+ DCHECK(!obj->IsInternalizedString());
// Function and object templates are not context specific.
- DCHECK(!obj.IsTemplateInfo());
+ DCHECK(!obj->IsTemplateInfo());
// Clear literal boilerplates and feedback.
- if (obj.IsFeedbackVector()) FeedbackVector::cast(obj).ClearSlots(isolate());
+ if (obj->IsFeedbackVector()) {
+ Handle<FeedbackVector>::cast(obj)->ClearSlots(isolate());
+ }
// Clear InterruptBudget when serializing FeedbackCell.
- if (obj.IsFeedbackCell()) {
- FeedbackCell::cast(obj).SetInitialInterruptBudget();
+ if (obj->IsFeedbackCell()) {
+ Handle<FeedbackCell>::cast(obj)->SetInitialInterruptBudget();
}
if (SerializeJSObjectWithEmbedderFields(obj)) {
return;
}
- if (obj.IsJSFunction()) {
+ if (obj->IsJSFunction()) {
// Unconditionally reset the JSFunction to its SFI's code, since we can't
// serialize optimized code anyway.
- JSFunction closure = JSFunction::cast(obj);
- closure.ResetIfBytecodeFlushed();
- if (closure.is_compiled()) closure.set_code(closure.shared().GetCode());
+ Handle<JSFunction> closure = Handle<JSFunction>::cast(obj);
+ closure->ResetIfBytecodeFlushed();
+ if (closure->is_compiled()) closure->set_code(closure->shared().GetCode());
}
- CheckRehashability(obj);
+ CheckRehashability(*obj);
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this, obj, &sink_);
@@ -196,29 +195,27 @@ bool ContextSerializer::ShouldBeInTheStartupObjectCache(HeapObject o) {
return o.IsName() || o.IsSharedFunctionInfo() || o.IsHeapNumber() ||
o.IsCode() || o.IsScopeInfo() || o.IsAccessorInfo() ||
o.IsTemplateInfo() || o.IsClassPositions() ||
- o.map() == ReadOnlyRoots(startup_serializer_->isolate())
- .fixed_cow_array_map();
+ o.map() == ReadOnlyRoots(isolate()).fixed_cow_array_map();
}
namespace {
bool DataIsEmpty(const StartupData& data) { return data.raw_size == 0; }
} // anonymous namespace
-bool ContextSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
- if (!obj.IsJSObject()) return false;
- JSObject js_obj = JSObject::cast(obj);
- int embedder_fields_count = js_obj.GetEmbedderFieldCount();
+bool ContextSerializer::SerializeJSObjectWithEmbedderFields(
+ Handle<HeapObject> obj) {
+ if (!obj->IsJSObject()) return false;
+ Handle<JSObject> js_obj = Handle<JSObject>::cast(obj);
+ int embedder_fields_count = js_obj->GetEmbedderFieldCount();
if (embedder_fields_count == 0) return false;
CHECK_GT(embedder_fields_count, 0);
- DCHECK(!js_obj.NeedsRehashing());
+ DCHECK(!js_obj->NeedsRehashing());
DisallowGarbageCollection no_gc;
DisallowJavascriptExecution no_js(isolate());
DisallowCompilation no_compile(isolate());
- HandleScope scope(isolate());
- Handle<JSObject> obj_handle(js_obj, isolate());
- v8::Local<v8::Object> api_obj = v8::Utils::ToLocal(obj_handle);
+ v8::Local<v8::Object> api_obj = v8::Utils::ToLocal(js_obj);
std::vector<EmbedderDataSlot::RawData> original_embedder_values;
std::vector<StartupData> serialized_data;
@@ -228,7 +225,7 @@ bool ContextSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
// serializer. For aligned pointers, call the serialize callback. Hold
// onto the result.
for (int i = 0; i < embedder_fields_count; i++) {
- EmbedderDataSlot embedder_data_slot(js_obj, i);
+ EmbedderDataSlot embedder_data_slot(*js_obj, i);
original_embedder_values.emplace_back(
embedder_data_slot.load_raw(isolate(), no_gc));
Object object = embedder_data_slot.load_tagged();
@@ -257,7 +254,7 @@ bool ContextSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
// with embedder callbacks.
for (int i = 0; i < embedder_fields_count; i++) {
if (!DataIsEmpty(serialized_data[i])) {
- EmbedderDataSlot(js_obj, i).store_raw(isolate(), kNullAddress, no_gc);
+ EmbedderDataSlot(*js_obj, i).store_raw(isolate(), kNullAddress, no_gc);
}
}
@@ -266,9 +263,10 @@ bool ContextSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
ObjectSerializer(this, js_obj, &sink_).Serialize();
// 4) Obtain back reference for the serialized object.
- SerializerReference reference =
- reference_map()->LookupReference(reinterpret_cast<void*>(js_obj.ptr()));
- DCHECK(reference.is_back_reference());
+ const SerializerReference* reference =
+ reference_map()->LookupReference(js_obj);
+ DCHECK_NOT_NULL(reference);
+ DCHECK(reference->is_back_reference());
// 5) Write data returned by the embedder callbacks into a separate sink,
// headed by the back reference. Restore the original embedder fields.
@@ -276,13 +274,10 @@ bool ContextSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
StartupData data = serialized_data[i];
if (DataIsEmpty(data)) continue;
// Restore original values from cleared fields.
- EmbedderDataSlot(js_obj, i).store_raw(isolate(),
- original_embedder_values[i], no_gc);
- embedder_fields_sink_.Put(kNewObject + static_cast<int>(reference.space()),
- "embedder field holder");
- embedder_fields_sink_.PutInt(reference.chunk_index(), "BackRefChunkIndex");
- embedder_fields_sink_.PutInt(reference.chunk_offset(),
- "BackRefChunkOffset");
+ EmbedderDataSlot(*js_obj, i)
+ .store_raw(isolate(), original_embedder_values[i], no_gc);
+ embedder_fields_sink_.Put(kNewObject, "embedder field holder");
+ embedder_fields_sink_.PutInt(reference->back_ref_index(), "BackRefIndex");
embedder_fields_sink_.PutInt(i, "embedder field index");
embedder_fields_sink_.PutInt(data.raw_size, "embedder fields data size");
embedder_fields_sink_.PutRaw(reinterpret_cast<const byte*>(data.data),
diff --git a/deps/v8/src/snapshot/context-serializer.h b/deps/v8/src/snapshot/context-serializer.h
index af8de77d80..03f195f3be 100644
--- a/deps/v8/src/snapshot/context-serializer.h
+++ b/deps/v8/src/snapshot/context-serializer.h
@@ -21,6 +21,8 @@ class V8_EXPORT_PRIVATE ContextSerializer : public Serializer {
v8::SerializeEmbedderFieldsCallback callback);
~ContextSerializer() override;
+ ContextSerializer(const ContextSerializer&) = delete;
+ ContextSerializer& operator=(const ContextSerializer&) = delete;
// Serialize the objects reachable from a single object pointer.
void Serialize(Context* o, const DisallowGarbageCollection& no_gc);
@@ -28,9 +30,9 @@ class V8_EXPORT_PRIVATE ContextSerializer : public Serializer {
bool can_be_rehashed() const { return can_be_rehashed_; }
private:
- void SerializeObject(HeapObject o) override;
+ void SerializeObjectImpl(Handle<HeapObject> o) override;
bool ShouldBeInTheStartupObjectCache(HeapObject o);
- bool SerializeJSObjectWithEmbedderFields(Object obj);
+ bool SerializeJSObjectWithEmbedderFields(Handle<HeapObject> obj);
void CheckRehashability(HeapObject obj);
StartupSerializer* startup_serializer_;
@@ -42,7 +44,6 @@ class V8_EXPORT_PRIVATE ContextSerializer : public Serializer {
// Used to store serialized data for embedder fields.
SnapshotByteSink embedder_fields_sink_;
- DISALLOW_COPY_AND_ASSIGN(ContextSerializer);
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/deserializer-allocator.cc b/deps/v8/src/snapshot/deserializer-allocator.cc
deleted file mode 100644
index 7ad49b0867..0000000000
--- a/deps/v8/src/snapshot/deserializer-allocator.cc
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/snapshot/deserializer-allocator.h"
-
-#include "src/heap/heap-inl.h" // crbug.com/v8/8499
-#include "src/heap/memory-chunk.h"
-#include "src/roots/roots.h"
-
-namespace v8 {
-namespace internal {
-
-void DeserializerAllocator::Initialize(Heap* heap) {
- heap_ = heap;
- roots_ = ReadOnlyRoots(heap);
-}
-
-// We know the space requirements before deserialization and can
-// pre-allocate that reserved space. During deserialization, all we need
-// to do is to bump up the pointer for each space in the reserved
-// space. This is also used for fixing back references.
-// We may have to split up the pre-allocation into several chunks
-// because it would not fit onto a single page. We do not have to keep
-// track of when to move to the next chunk. An opcode will signal this.
-// Since multiple large objects cannot be folded into one large object
-// space allocation, we have to do an actual allocation when deserializing
-// each large object. Instead of tracking offset for back references, we
-// reference large objects by index.
-Address DeserializerAllocator::AllocateRaw(SnapshotSpace space, int size) {
- const int space_number = static_cast<int>(space);
- if (space == SnapshotSpace::kLargeObject) {
- // Note that we currently do not support deserialization of large code
- // objects.
- HeapObject obj;
- AlwaysAllocateScope scope(heap_);
- OldLargeObjectSpace* lo_space = heap_->lo_space();
- AllocationResult result = lo_space->AllocateRaw(size);
- obj = result.ToObjectChecked();
- deserialized_large_objects_.push_back(obj);
- return obj.address();
- } else if (space == SnapshotSpace::kMap) {
- DCHECK_EQ(Map::kSize, size);
- return allocated_maps_[next_map_index_++];
- } else {
- DCHECK(IsPreAllocatedSpace(space));
- Address address = high_water_[space_number];
- DCHECK_NE(address, kNullAddress);
- high_water_[space_number] += size;
-#ifdef DEBUG
- // Assert that the current reserved chunk is still big enough.
- const Heap::Reservation& reservation = reservations_[space_number];
- int chunk_index = current_chunk_[space_number];
- DCHECK_LE(high_water_[space_number], reservation[chunk_index].end);
-#endif
-#ifndef V8_ENABLE_THIRD_PARTY_HEAP
- if (space == SnapshotSpace::kCode)
- MemoryChunk::FromAddress(address)
- ->GetCodeObjectRegistry()
- ->RegisterNewlyAllocatedCodeObject(address);
-#endif
- return address;
- }
-}
-
-Address DeserializerAllocator::Allocate(SnapshotSpace space, int size) {
-#ifdef DEBUG
- if (previous_allocation_start_ != kNullAddress) {
- // Make sure that the previous allocation is initialized sufficiently to
- // be iterated over by the GC.
- Address object_address = previous_allocation_start_;
- Address previous_allocation_end =
- previous_allocation_start_ + previous_allocation_size_;
- while (object_address != previous_allocation_end) {
- int object_size = HeapObject::FromAddress(object_address).Size();
- DCHECK_GT(object_size, 0);
- DCHECK_LE(object_address + object_size, previous_allocation_end);
- object_address += object_size;
- }
- }
-#endif
-
- Address address;
- HeapObject obj;
- // TODO(steveblackburn) Note that the third party heap allocates objects
- // at reservation time, which means alignment must be acted on at
- // reservation time, not here. Since the current encoding does not
- // inform the reservation of the alignment, it must be conservatively
- // aligned.
- //
- // A more general approach will be to avoid reservation altogether, and
- // instead of chunk index/offset encoding, simply encode backreferences
- // by index (this can be optimized by applying something like register
- // allocation to keep the metadata needed to record the in-flight
- // backreferences minimal). This has the significant advantage of
- // abstracting away the details of the memory allocator from this code.
- // At each allocation, the regular allocator performs allocation,
- // and a fixed-sized table is used to track and fix all back references.
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- address = AllocateRaw(space, size);
- } else if (next_alignment_ != kWordAligned) {
- const int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
- address = AllocateRaw(space, reserved);
- obj = HeapObject::FromAddress(address);
- // If one of the following assertions fails, then we are deserializing an
- // aligned object when the filler maps have not been deserialized yet.
- // We require filler maps as padding to align the object.
- DCHECK(roots_.free_space_map().IsMap());
- DCHECK(roots_.one_pointer_filler_map().IsMap());
- DCHECK(roots_.two_pointer_filler_map().IsMap());
- obj = Heap::AlignWithFiller(roots_, obj, size, reserved, next_alignment_);
- address = obj.address();
- next_alignment_ = kWordAligned;
- } else {
- address = AllocateRaw(space, size);
- }
-
-#ifdef DEBUG
- previous_allocation_start_ = address;
- previous_allocation_size_ = size;
-#endif
-
- return address;
-}
-
-void DeserializerAllocator::MoveToNextChunk(SnapshotSpace space) {
- DCHECK(IsPreAllocatedSpace(space));
- const int space_number = static_cast<int>(space);
- uint32_t chunk_index = current_chunk_[space_number];
- const Heap::Reservation& reservation = reservations_[space_number];
- // Make sure the current chunk is indeed exhausted.
- CHECK_EQ(reservation[chunk_index].end, high_water_[space_number]);
- // Move to next reserved chunk.
- chunk_index = ++current_chunk_[space_number];
- CHECK_LT(chunk_index, reservation.size());
- high_water_[space_number] = reservation[chunk_index].start;
-}
-
-HeapObject DeserializerAllocator::GetMap(uint32_t index) {
- DCHECK_LT(index, next_map_index_);
- return HeapObject::FromAddress(allocated_maps_[index]);
-}
-
-HeapObject DeserializerAllocator::GetLargeObject(uint32_t index) {
- DCHECK_LT(index, deserialized_large_objects_.size());
- return deserialized_large_objects_[index];
-}
-
-HeapObject DeserializerAllocator::GetObject(SnapshotSpace space,
- uint32_t chunk_index,
- uint32_t chunk_offset) {
- DCHECK(IsPreAllocatedSpace(space));
- const int space_number = static_cast<int>(space);
- DCHECK_LE(chunk_index, current_chunk_[space_number]);
- Address address =
- reservations_[space_number][chunk_index].start + chunk_offset;
- if (next_alignment_ != kWordAligned) {
- int padding = Heap::GetFillToAlign(address, next_alignment_);
- next_alignment_ = kWordAligned;
- DCHECK(padding == 0 ||
- HeapObject::FromAddress(address).IsFreeSpaceOrFiller());
- address += padding;
- }
- return HeapObject::FromAddress(address);
-}
-
-void DeserializerAllocator::DecodeReservation(
- const std::vector<SerializedData::Reservation>& res) {
- DCHECK_EQ(0, reservations_[0].size());
- int current_space = 0;
- for (auto& r : res) {
- reservations_[current_space].push_back(
- {r.chunk_size(), kNullAddress, kNullAddress});
- if (r.is_last()) current_space++;
- }
- DCHECK_EQ(kNumberOfSpaces, current_space);
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
-}
-
-bool DeserializerAllocator::ReserveSpace() {
-#ifdef DEBUG
- for (int i = 0; i < kNumberOfSpaces; ++i) {
- DCHECK_GT(reservations_[i].size(), 0);
- }
-#endif // DEBUG
- DCHECK(allocated_maps_.empty());
- // TODO(v8:7464): Allocate using the off-heap ReadOnlySpace here once
- // implemented.
- if (!heap_->ReserveSpace(reservations_, &allocated_maps_)) {
- return false;
- }
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- high_water_[i] = reservations_[i][0].start;
- }
- return true;
-}
-
-bool DeserializerAllocator::ReservationsAreFullyUsed() const {
- for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
- const uint32_t chunk_index = current_chunk_[space];
- if (reservations_[space].size() != chunk_index + 1) {
- return false;
- }
- if (reservations_[space][chunk_index].end != high_water_[space]) {
- return false;
- }
- }
- return (allocated_maps_.size() == next_map_index_);
-}
-
-void DeserializerAllocator::RegisterDeserializedObjectsForBlackAllocation() {
- heap_->RegisterDeserializedObjectsForBlackAllocation(
- reservations_, deserialized_large_objects_, allocated_maps_);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/snapshot/deserializer-allocator.h b/deps/v8/src/snapshot/deserializer-allocator.h
deleted file mode 100644
index 403e386fda..0000000000
--- a/deps/v8/src/snapshot/deserializer-allocator.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SNAPSHOT_DESERIALIZER_ALLOCATOR_H_
-#define V8_SNAPSHOT_DESERIALIZER_ALLOCATOR_H_
-
-#include "src/common/globals.h"
-#include "src/heap/heap.h"
-#include "src/objects/heap-object.h"
-#include "src/roots/roots.h"
-#include "src/snapshot/references.h"
-#include "src/snapshot/snapshot-data.h"
-
-namespace v8 {
-namespace internal {
-
-class Deserializer;
-class StartupDeserializer;
-
-class DeserializerAllocator final {
- public:
- DeserializerAllocator() = default;
-
- void Initialize(Heap* heap);
-
- // ------- Allocation Methods -------
- // Methods related to memory allocation during deserialization.
-
- Address Allocate(SnapshotSpace space, int size);
-
- void MoveToNextChunk(SnapshotSpace space);
- void SetAlignment(AllocationAlignment alignment) {
- DCHECK_EQ(kWordAligned, next_alignment_);
- DCHECK_LE(kWordAligned, alignment);
- DCHECK_LE(alignment, kDoubleUnaligned);
- next_alignment_ = static_cast<AllocationAlignment>(alignment);
- }
-
- HeapObject GetMap(uint32_t index);
- HeapObject GetLargeObject(uint32_t index);
- HeapObject GetObject(SnapshotSpace space, uint32_t chunk_index,
- uint32_t chunk_offset);
-
- // ------- Reservation Methods -------
- // Methods related to memory reservations (prior to deserialization).
-
- V8_EXPORT_PRIVATE void DecodeReservation(
- const std::vector<SerializedData::Reservation>& res);
- bool ReserveSpace();
-
- bool ReservationsAreFullyUsed() const;
-
- // ------- Misc Utility Methods -------
-
- void RegisterDeserializedObjectsForBlackAllocation();
-
- private:
- // Raw allocation without considering alignment.
- Address AllocateRaw(SnapshotSpace space, int size);
-
- private:
- static constexpr int kNumberOfPreallocatedSpaces =
- static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
- static constexpr int kNumberOfSpaces =
- static_cast<int>(SnapshotSpace::kNumberOfSpaces);
-
- // The address of the next object that will be allocated in each space.
- // Each space has a number of chunks reserved by the GC, with each chunk
- // fitting into a page. Deserialized objects are allocated into the
- // current chunk of the target space by bumping up high water mark.
- Heap::Reservation reservations_[kNumberOfSpaces];
- uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
- Address high_water_[kNumberOfPreallocatedSpaces];
-
-#ifdef DEBUG
- // Record the previous object allocated for DCHECKs.
- Address previous_allocation_start_ = kNullAddress;
- int previous_allocation_size_ = 0;
-#endif
-
- // The alignment of the next allocation.
- AllocationAlignment next_alignment_ = kWordAligned;
-
- // All required maps are pre-allocated during reservation. {next_map_index_}
- // stores the index of the next map to return from allocation.
- uint32_t next_map_index_ = 0;
- std::vector<Address> allocated_maps_;
-
- // Allocated large objects are kept in this map and may be fetched later as
- // back-references.
- std::vector<HeapObject> deserialized_large_objects_;
-
- // ReadOnlyRoots and heap are null until Initialize is called.
- Heap* heap_ = nullptr;
- ReadOnlyRoots roots_ = ReadOnlyRoots(static_cast<Address*>(nullptr));
-
- DISALLOW_COPY_AND_ASSIGN(DeserializerAllocator);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SNAPSHOT_DESERIALIZER_ALLOCATOR_H_
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 132af570b3..5a729b35d3 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -6,63 +6,221 @@
#include "src/base/logging.h"
#include "src/codegen/assembler-inl.h"
+#include "src/common/assert-scope.h"
#include "src/common/external-pointer.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/heap/heap-write-barrier.h"
#include "src/heap/read-only-heap.h"
#include "src/interpreter/interpreter.h"
#include "src/logging/log.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/cell-inl.h"
+#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/hash-table.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/maybe-object.h"
#include "src/objects/objects-body-descriptors-inl.h"
+#include "src/objects/objects.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
#include "src/objects/string.h"
#include "src/roots/roots.h"
#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/references.h"
#include "src/snapshot/serializer-deserializer.h"
+#include "src/snapshot/snapshot-data.h"
#include "src/snapshot/snapshot.h"
#include "src/tracing/trace-event.h"
#include "src/tracing/traced-value.h"
+#include "src/utils/memcopy.h"
namespace v8 {
namespace internal {
-template <typename TSlot>
-TSlot Deserializer::Write(TSlot dest, MaybeObject value) {
- DCHECK(!next_reference_is_weak_);
- dest.store(value);
- return dest + 1;
-}
+// A SlotAccessor for a slot in a HeapObject, which abstracts the slot
+// operations done by the deserializer in a way which is GC-safe. In particular,
+// rather than an absolute slot address, this accessor holds a Handle to the
+// HeapObject, which is updated if the HeapObject moves.
+class SlotAccessorForHeapObject {
+ public:
+ static SlotAccessorForHeapObject ForSlotIndex(Handle<HeapObject> object,
+ int index) {
+ return SlotAccessorForHeapObject(object, index * kTaggedSize);
+ }
+ static SlotAccessorForHeapObject ForSlotOffset(Handle<HeapObject> object,
+ int offset) {
+ return SlotAccessorForHeapObject(object, offset);
+ }
+
+ MaybeObjectSlot slot() const { return object_->RawMaybeWeakField(offset_); }
+ Handle<HeapObject> object() const { return object_; }
+ int offset() const { return offset_; }
+
+ // Writes the given value to this slot, optionally with an offset (e.g. for
+ // repeat writes). Returns the number of slots written (which is one).
+ int Write(MaybeObject value, int slot_offset = 0) {
+ MaybeObjectSlot current_slot = slot() + slot_offset;
+ current_slot.Relaxed_Store(value);
+ WriteBarrier::Marking(*object_, current_slot, value);
+ // No need for a generational write barrier.
+ DCHECK(!Heap::InYoungGeneration(value));
+ return 1;
+ }
+ int Write(HeapObject value, HeapObjectReferenceType ref_type,
+ int slot_offset = 0) {
+ return Write(HeapObjectReference::From(value, ref_type), slot_offset);
+ }
+ int Write(Handle<HeapObject> value, HeapObjectReferenceType ref_type,
+ int slot_offset = 0) {
+ return Write(*value, ref_type, slot_offset);
+ }
+
+ // Same as Write, but additionally with a generational barrier.
+ int WriteWithGenerationalBarrier(MaybeObject value) {
+ MaybeObjectSlot current_slot = slot();
+ current_slot.Relaxed_Store(value);
+ WriteBarrier::Marking(*object_, current_slot, value);
+ if (Heap::InYoungGeneration(value)) {
+ GenerationalBarrier(*object_, current_slot, value);
+ }
+ return 1;
+ }
+ int WriteWithGenerationalBarrier(HeapObject value,
+ HeapObjectReferenceType ref_type) {
+ return WriteWithGenerationalBarrier(
+ HeapObjectReference::From(value, ref_type));
+ }
+ int WriteWithGenerationalBarrier(Handle<HeapObject> value,
+ HeapObjectReferenceType ref_type) {
+ return WriteWithGenerationalBarrier(*value, ref_type);
+ }
+
+ private:
+ SlotAccessorForHeapObject(Handle<HeapObject> object, int offset)
+ : object_(object), offset_(offset) {}
+
+ const Handle<HeapObject> object_;
+ const int offset_;
+};
+
+// A SlotAccessor for absolute full slot addresses.
+class SlotAccessorForRootSlots {
+ public:
+ explicit SlotAccessorForRootSlots(FullMaybeObjectSlot slot) : slot_(slot) {}
+
+ FullMaybeObjectSlot slot() const { return slot_; }
+ Handle<HeapObject> object() const { UNREACHABLE(); }
+ int offset() const { UNREACHABLE(); }
+
+ // Writes the given value to this slot, optionally with an offset (e.g. for
+ // repeat writes). Returns the number of slots written (which is one).
+ int Write(MaybeObject value, int slot_offset = 0) {
+ FullMaybeObjectSlot current_slot = slot() + slot_offset;
+ current_slot.Relaxed_Store(value);
+ return 1;
+ }
+ int Write(HeapObject value, HeapObjectReferenceType ref_type,
+ int slot_offset = 0) {
+ return Write(HeapObjectReference::From(value, ref_type), slot_offset);
+ }
+ int Write(Handle<HeapObject> value, HeapObjectReferenceType ref_type,
+ int slot_offset = 0) {
+ return Write(*value, ref_type, slot_offset);
+ }
+
+ int WriteWithGenerationalBarrier(MaybeObject value) { return Write(value); }
+ int WriteWithGenerationalBarrier(HeapObject value,
+ HeapObjectReferenceType ref_type) {
+ return WriteWithGenerationalBarrier(
+ HeapObjectReference::From(value, ref_type));
+ }
+ int WriteWithGenerationalBarrier(Handle<HeapObject> value,
+ HeapObjectReferenceType ref_type) {
+ return WriteWithGenerationalBarrier(*value, ref_type);
+ }
+
+ private:
+ const FullMaybeObjectSlot slot_;
+};
+
+// A SlotAccessor for creating a Handle, which saves a Handle allocation when
+// a Handle already exists.
+class SlotAccessorForHandle {
+ public:
+ SlotAccessorForHandle(Handle<HeapObject>* handle, Isolate* isolate)
+ : handle_(handle), isolate_(isolate) {}
+
+ MaybeObjectSlot slot() const { UNREACHABLE(); }
+ Handle<HeapObject> object() const { UNREACHABLE(); }
+ int offset() const { UNREACHABLE(); }
+
+ int Write(MaybeObject value, int slot_offset = 0) { UNREACHABLE(); }
+ int Write(HeapObject value, HeapObjectReferenceType ref_type,
+ int slot_offset = 0) {
+ DCHECK_EQ(slot_offset, 0);
+ DCHECK_EQ(ref_type, HeapObjectReferenceType::STRONG);
+ *handle_ = handle(value, isolate_);
+ return 1;
+ }
+ int Write(Handle<HeapObject> value, HeapObjectReferenceType ref_type,
+ int slot_offset = 0) {
+ DCHECK_EQ(slot_offset, 0);
+ DCHECK_EQ(ref_type, HeapObjectReferenceType::STRONG);
+ *handle_ = value;
+ return 1;
+ }
+
+ int WriteWithGenerationalBarrier(HeapObject value,
+ HeapObjectReferenceType ref_type) {
+ return Write(value, ref_type);
+ }
+ int WriteWithGenerationalBarrier(Handle<HeapObject> value,
+ HeapObjectReferenceType ref_type) {
+ return Write(value, ref_type);
+ }
+
+ private:
+ Handle<HeapObject>* handle_;
+ Isolate* isolate_;
+};
template <typename TSlot>
-TSlot Deserializer::WriteAddress(TSlot dest, Address value) {
+int Deserializer::WriteAddress(TSlot dest, Address value) {
DCHECK(!next_reference_is_weak_);
memcpy(dest.ToVoidPtr(), &value, kSystemPointerSize);
STATIC_ASSERT(IsAligned(kSystemPointerSize, TSlot::kSlotDataSize));
- return dest + (kSystemPointerSize / TSlot::kSlotDataSize);
+ return (kSystemPointerSize / TSlot::kSlotDataSize);
}
template <typename TSlot>
-TSlot Deserializer::WriteExternalPointer(TSlot dest, Address value) {
- value = EncodeExternalPointer(isolate(), value);
+int Deserializer::WriteExternalPointer(TSlot dest, Address value,
+ ExternalPointerTag tag) {
DCHECK(!next_reference_is_weak_);
- memcpy(dest.ToVoidPtr(), &value, kExternalPointerSize);
+ InitExternalPointerField(dest.address(), isolate(), value, tag);
STATIC_ASSERT(IsAligned(kExternalPointerSize, TSlot::kSlotDataSize));
- return dest + (kExternalPointerSize / TSlot::kSlotDataSize);
+ return (kExternalPointerSize / TSlot::kSlotDataSize);
}
-void Deserializer::Initialize(Isolate* isolate) {
- DCHECK_NULL(isolate_);
+Deserializer::Deserializer(Isolate* isolate, Vector<const byte> payload,
+ uint32_t magic_number, bool deserializing_user_code,
+ bool can_rehash)
+ : isolate_(isolate),
+ source_(payload),
+ magic_number_(magic_number),
+ deserializing_user_code_(deserializing_user_code),
+ can_rehash_(can_rehash) {
DCHECK_NOT_NULL(isolate);
- isolate_ = isolate;
- allocator()->Initialize(isolate->heap());
+ isolate_->RegisterDeserializerStarted();
+
+ // We start the indices here at 1, so that we can distinguish between an
+ // actual index and a nullptr (serialized as kNullRefSentinel) in a
+ // deserialized object requiring fix-up.
+ STATIC_ASSERT(kNullRefSentinel == 0);
+ backing_stores_.push_back({});
#ifdef DEBUG
num_api_references_ = 0;
@@ -83,8 +241,8 @@ void Deserializer::Initialize(Isolate* isolate) {
void Deserializer::Rehash() {
DCHECK(can_rehash() || deserializing_user_code());
- for (HeapObject item : to_rehash_) {
- item.RehashBasedOnMap(isolate());
+ for (Handle<HeapObject> item : to_rehash_) {
+ item->RehashBasedOnMap(isolate());
}
}
@@ -94,16 +252,18 @@ Deserializer::~Deserializer() {
if (source_.position() == 0) return;
// Check that we only have padding bytes remaining.
while (source_.HasMore()) DCHECK_EQ(kNop, source_.Get());
- // Check that we've fully used all reserved space.
- DCHECK(allocator()->ReservationsAreFullyUsed());
+ // Check that there are no remaining forward refs.
+ DCHECK_EQ(num_unresolved_forward_refs_, 0);
+ DCHECK(unresolved_forward_refs_.empty());
#endif // DEBUG
+ isolate_->RegisterDeserializerFinished();
}
// This is called on the roots. It is the driver of the deserialization
// process. It is also called on the body of each function.
void Deserializer::VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) {
- ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end), kNullAddress);
+ ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end));
}
void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
@@ -112,8 +272,6 @@ void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
}
void Deserializer::DeserializeDeferredObjects() {
- DisallowGarbageCollection no_gc;
-
for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
SnapshotSpace space = NewObject::Decode(code);
ReadObject(space);
@@ -122,10 +280,20 @@ void Deserializer::DeserializeDeferredObjects() {
void Deserializer::LogNewMapEvents() {
DisallowGarbageCollection no_gc;
- for (Map map : new_maps_) {
+ for (Handle<Map> map : new_maps_) {
DCHECK(FLAG_trace_maps);
- LOG(isolate(), MapCreate(map));
- LOG(isolate(), MapDetails(map));
+ LOG(isolate(), MapCreate(*map));
+ LOG(isolate(), MapDetails(*map));
+ }
+}
+
+void Deserializer::WeakenDescriptorArrays() {
+ DisallowHeapAllocation no_gc;
+ for (Handle<DescriptorArray> descriptor_array : new_descriptor_arrays_) {
+ DCHECK(descriptor_array->IsStrongDescriptorArray());
+ descriptor_array->set_map(ReadOnlyRoots(isolate()).descriptor_array_map());
+ WriteBarrier::Marking(*descriptor_array,
+ descriptor_array->number_of_descriptors());
}
}
@@ -157,141 +325,149 @@ uint32_t StringTableInsertionKey::ComputeHashField(String string) {
return string.hash_field();
}
-HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
- SnapshotSpace space) {
+void Deserializer::PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
+ SnapshotSpace space) {
+ DCHECK_EQ(*map, obj->map());
DisallowGarbageCollection no_gc;
+ InstanceType instance_type = map->instance_type();
if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
- if (obj.IsString()) {
+ if (InstanceTypeChecker::IsString(instance_type)) {
// Uninitialize hash field as we need to recompute the hash.
- String string = String::cast(obj);
- string.set_hash_field(String::kEmptyHashField);
+ Handle<String> string = Handle<String>::cast(obj);
+ string->set_hash_field(String::kEmptyHashField);
// Rehash strings before read-only space is sealed. Strings outside
// read-only space are rehashed lazily. (e.g. when rehashing dictionaries)
if (space == SnapshotSpace::kReadOnlyHeap) {
to_rehash_.push_back(obj);
}
- } else if (obj.NeedsRehashing()) {
+ } else if (obj->NeedsRehashing(instance_type)) {
to_rehash_.push_back(obj);
}
}
if (deserializing_user_code()) {
- if (obj.IsString()) {
- String string = String::cast(obj);
- if (string.IsInternalizedString()) {
- // Canonicalize the internalized string. If it already exists in the
- // string table, set it to forward to the existing one.
-
- // Create storage for a fake handle -- this only needs to be valid until
- // the end of LookupKey.
- Address handle_storage = string.ptr();
- Handle<String> handle(&handle_storage);
- StringTableInsertionKey key(handle);
- String result = *isolate()->string_table()->LookupKey(isolate(), &key);
-
- if (FLAG_thin_strings && result != string) {
- string.MakeThin(isolate(), result);
- }
- return result;
+ if (InstanceTypeChecker::IsInternalizedString(instance_type)) {
+ // Canonicalize the internalized string. If it already exists in the
+ // string table, set it to forward to the existing one.
+ Handle<String> string = Handle<String>::cast(obj);
+
+ StringTableInsertionKey key(string);
+ Handle<String> result =
+ isolate()->string_table()->LookupKey(isolate(), &key);
+
+ if (FLAG_thin_strings && *result != *string) {
+ string->MakeThin(isolate(), *result);
+ // Mutate the given object handle so that the backreference entry is
+ // also updated.
+ obj.PatchValue(*result);
}
- } else if (obj.IsScript()) {
- new_scripts_.push_back(handle(Script::cast(obj), isolate()));
- } else if (obj.IsAllocationSite()) {
+ return;
+ } else if (InstanceTypeChecker::IsScript(instance_type)) {
+ new_scripts_.push_back(Handle<Script>::cast(obj));
+ } else if (InstanceTypeChecker::IsAllocationSite(instance_type)) {
// We should link new allocation sites, but we can't do this immediately
// because |AllocationSite::HasWeakNext()| internally accesses
// |Heap::roots_| that may not have been initialized yet. So defer this to
// |ObjectDeserializer::CommitPostProcessedObjects()|.
- new_allocation_sites_.push_back(AllocationSite::cast(obj));
+ new_allocation_sites_.push_back(Handle<AllocationSite>::cast(obj));
} else {
- DCHECK(CanBeDeferred(obj));
+ DCHECK(CanBeDeferred(*obj));
}
}
- if (obj.IsScript()) {
- LogScriptEvents(Script::cast(obj));
- } else if (obj.IsCode()) {
+
+ if (InstanceTypeChecker::IsScript(instance_type)) {
+ LogScriptEvents(Script::cast(*obj));
+ } else if (InstanceTypeChecker::IsCode(instance_type)) {
// We flush all code pages after deserializing the startup snapshot.
// Hence we only remember each individual code object when deserializing
// user code.
- if (deserializing_user_code() || space == SnapshotSpace::kLargeObject) {
- new_code_objects_.push_back(Code::cast(obj));
- }
- } else if (FLAG_trace_maps && obj.IsMap()) {
- // Keep track of all seen Maps to log them later since they might be only
- // partially initialized at this point.
- new_maps_.push_back(Map::cast(obj));
- } else if (obj.IsAccessorInfo()) {
+ if (deserializing_user_code()) {
+ new_code_objects_.push_back(Handle<Code>::cast(obj));
+ }
+ } else if (InstanceTypeChecker::IsMap(instance_type)) {
+ if (FLAG_trace_maps) {
+ // Keep track of all seen Maps to log them later since they might be only
+ // partially initialized at this point.
+ new_maps_.push_back(Handle<Map>::cast(obj));
+ }
+ } else if (InstanceTypeChecker::IsAccessorInfo(instance_type)) {
#ifdef USE_SIMULATOR
- accessor_infos_.push_back(AccessorInfo::cast(obj));
+ accessor_infos_.push_back(Handle<AccessorInfo>::cast(obj));
#endif
- } else if (obj.IsCallHandlerInfo()) {
+ } else if (InstanceTypeChecker::IsCallHandlerInfo(instance_type)) {
#ifdef USE_SIMULATOR
- call_handler_infos_.push_back(CallHandlerInfo::cast(obj));
+ call_handler_infos_.push_back(Handle<CallHandlerInfo>::cast(obj));
#endif
- } else if (obj.IsExternalString()) {
- ExternalString string = ExternalString::cast(obj);
- uint32_t index = string.resource_as_uint32();
+ } else if (InstanceTypeChecker::IsExternalString(instance_type)) {
+ Handle<ExternalString> string = Handle<ExternalString>::cast(obj);
+ uint32_t index = string->GetResourceRefForDeserialization();
Address address =
static_cast<Address>(isolate()->api_external_references()[index]);
- string.set_address_as_resource(isolate(), address);
- isolate()->heap()->UpdateExternalString(string, 0,
- string.ExternalPayloadSize());
- isolate()->heap()->RegisterExternalString(String::cast(obj));
- } else if (obj.IsJSDataView()) {
- JSDataView data_view = JSDataView::cast(obj);
- JSArrayBuffer buffer = JSArrayBuffer::cast(data_view.buffer());
+ string->AllocateExternalPointerEntries(isolate());
+ string->set_address_as_resource(isolate(), address);
+ isolate()->heap()->UpdateExternalString(*string, 0,
+ string->ExternalPayloadSize());
+ isolate()->heap()->RegisterExternalString(*string);
+ } else if (InstanceTypeChecker::IsJSDataView(instance_type)) {
+ Handle<JSDataView> data_view = Handle<JSDataView>::cast(obj);
+ JSArrayBuffer buffer = JSArrayBuffer::cast(data_view->buffer());
void* backing_store = nullptr;
- if (buffer.backing_store() != nullptr) {
+ uint32_t store_index = buffer.GetBackingStoreRefForDeserialization();
+ if (store_index != kNullRefSentinel) {
// The backing store of the JSArrayBuffer has not been correctly restored
// yet, as that may trigger GC. The backing_store field currently contains
// a numbered reference to an already deserialized backing store.
- uint32_t store_index = buffer.GetBackingStoreRefForDeserialization();
backing_store = backing_stores_[store_index]->buffer_start();
}
- data_view.set_data_pointer(
+ data_view->AllocateExternalPointerEntries(isolate());
+ data_view->set_data_pointer(
isolate(),
- reinterpret_cast<uint8_t*>(backing_store) + data_view.byte_offset());
- } else if (obj.IsJSTypedArray()) {
- JSTypedArray typed_array = JSTypedArray::cast(obj);
+ reinterpret_cast<uint8_t*>(backing_store) + data_view->byte_offset());
+ } else if (InstanceTypeChecker::IsJSTypedArray(instance_type)) {
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(obj);
// Fixup typed array pointers.
- if (typed_array.is_on_heap()) {
- typed_array.SetOnHeapDataPtr(isolate(),
- HeapObject::cast(typed_array.base_pointer()),
- typed_array.external_pointer());
+ if (typed_array->is_on_heap()) {
+ Address raw_external_pointer = typed_array->external_pointer_raw();
+ typed_array->AllocateExternalPointerEntries(isolate());
+ typed_array->SetOnHeapDataPtr(
+ isolate(), HeapObject::cast(typed_array->base_pointer()),
+ raw_external_pointer);
} else {
// Serializer writes backing store ref as a DataPtr() value.
uint32_t store_index =
- typed_array.GetExternalBackingStoreRefForDeserialization();
+ typed_array->GetExternalBackingStoreRefForDeserialization();
auto backing_store = backing_stores_[store_index];
auto start = backing_store
? reinterpret_cast<byte*>(backing_store->buffer_start())
: nullptr;
- typed_array.SetOffHeapDataPtr(isolate(), start,
- typed_array.byte_offset());
+ typed_array->AllocateExternalPointerEntries(isolate());
+ typed_array->SetOffHeapDataPtr(isolate(), start,
+ typed_array->byte_offset());
}
- } else if (obj.IsJSArrayBuffer()) {
- JSArrayBuffer buffer = JSArrayBuffer::cast(obj);
+ } else if (InstanceTypeChecker::IsJSArrayBuffer(instance_type)) {
+ Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(obj);
// Postpone allocation of backing store to avoid triggering the GC.
- if (buffer.backing_store() != nullptr) {
- new_off_heap_array_buffers_.push_back(handle(buffer, isolate()));
+ if (buffer->GetBackingStoreRefForDeserialization() != kNullRefSentinel) {
+ new_off_heap_array_buffers_.push_back(buffer);
+ } else {
+ buffer->AllocateExternalPointerEntries(isolate());
+ buffer->set_backing_store(isolate(), nullptr);
}
- } else if (obj.IsBytecodeArray()) {
+ } else if (InstanceTypeChecker::IsBytecodeArray(instance_type)) {
// TODO(mythria): Remove these once we store the default values for these
// fields in the serializer.
- BytecodeArray bytecode_array = BytecodeArray::cast(obj);
- bytecode_array.set_osr_loop_nesting_level(0);
- }
-#ifdef DEBUG
- if (obj.IsDescriptorArray()) {
- DescriptorArray descriptor_array = DescriptorArray::cast(obj);
- DCHECK_EQ(0, descriptor_array.raw_number_of_marked_descriptors());
+ Handle<BytecodeArray> bytecode_array = Handle<BytecodeArray>::cast(obj);
+ bytecode_array->set_osr_loop_nesting_level(0);
+ } else if (InstanceTypeChecker::IsDescriptorArray(instance_type)) {
+ DCHECK(InstanceTypeChecker::IsStrongDescriptorArray(instance_type));
+ Handle<DescriptorArray> descriptors = Handle<DescriptorArray>::cast(obj);
+ new_descriptor_arrays_.push_back(descriptors);
}
-#endif
// Check alignment.
- DCHECK_EQ(0, Heap::GetFillToAlign(obj.address(),
- HeapObject::RequiredAlignment(obj.map())));
- return obj;
+ DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
+ HeapObject::RequiredAlignment(*map)));
}
HeapObjectReferenceType Deserializer::GetAndResetNextReferenceType() {
@@ -302,162 +478,180 @@ HeapObjectReferenceType Deserializer::GetAndResetNextReferenceType() {
return type;
}
-HeapObject Deserializer::GetBackReferencedObject(SnapshotSpace space) {
- HeapObject obj;
- switch (space) {
- case SnapshotSpace::kLargeObject:
- obj = allocator()->GetLargeObject(source_.GetInt());
- break;
- case SnapshotSpace::kMap:
- obj = allocator()->GetMap(source_.GetInt());
- break;
- case SnapshotSpace::kReadOnlyHeap: {
- uint32_t chunk_index = source_.GetInt();
- uint32_t chunk_offset = source_.GetInt();
- if (isolate()->heap()->deserialization_complete()) {
- ReadOnlySpace* read_only_space = isolate()->heap()->read_only_space();
- ReadOnlyPage* page = read_only_space->pages()[chunk_index];
- Address address = page->OffsetToAddress(chunk_offset);
- obj = HeapObject::FromAddress(address);
- } else {
- obj = allocator()->GetObject(space, chunk_index, chunk_offset);
- }
- break;
- }
- default: {
- uint32_t chunk_index = source_.GetInt();
- uint32_t chunk_offset = source_.GetInt();
- obj = allocator()->GetObject(space, chunk_index, chunk_offset);
- break;
- }
- }
+Handle<HeapObject> Deserializer::GetBackReferencedObject() {
+ Handle<HeapObject> obj = back_refs_[source_.GetInt()];
- if (deserializing_user_code() && obj.IsThinString()) {
- obj = ThinString::cast(obj).actual();
- }
+ // We don't allow ThinStrings in backreferences -- if internalization produces
+ // a thin string, then it should also update the backref handle.
+ DCHECK(!obj->IsThinString());
hot_objects_.Add(obj);
- DCHECK(!HasWeakHeapObjectTag(obj));
+ DCHECK(!HasWeakHeapObjectTag(*obj));
return obj;
}
-HeapObject Deserializer::ReadObject() {
- MaybeObject object;
- ReadData(FullMaybeObjectSlot(&object), FullMaybeObjectSlot(&object + 1),
- kNullAddress);
- return object.GetHeapObjectAssumeStrong();
+Handle<HeapObject> Deserializer::ReadObject() {
+ Handle<HeapObject> ret;
+ CHECK_EQ(ReadSingleBytecodeData(source_.Get(),
+ SlotAccessorForHandle(&ret, isolate())),
+ 1);
+ return ret;
}
-HeapObject Deserializer::ReadObject(SnapshotSpace space) {
- DisallowGarbageCollection no_gc;
-
- const int size = source_.GetInt() << kObjectAlignmentBits;
+Handle<HeapObject> Deserializer::ReadObject(SnapshotSpace space) {
+ const int size_in_tagged = source_.GetInt();
+ const int size_in_bytes = size_in_tagged * kTaggedSize;
// The map can't be a forward ref. If you want the map to be a forward ref,
// then you're probably serializing the meta-map, in which case you want to
// use the kNewMetaMap bytecode.
DCHECK_NE(source()->Peek(), kRegisterPendingForwardRef);
- Map map = Map::cast(ReadObject());
-
- // The serializer allocated the object now, so the next bytecodes might be an
- // alignment prefix and/or a next chunk
- if (base::IsInRange<byte, byte>(source()->Peek(), kAlignmentPrefix,
- kAlignmentPrefix + 2)) {
- int alignment = source()->Get() - (kAlignmentPrefix - 1);
- allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
+ Handle<Map> map = Handle<Map>::cast(ReadObject());
+
+ // Filling an object's fields can cause GCs and heap walks, so this object has
+ // to be in a 'sufficiently initialised' state by the time the next allocation
+ // can happen. For this to be the case, the object is carefully deserialized
+ // as follows:
+ // * The space for the object is allocated.
+ // * The map is set on the object so that the GC knows what type the object
+ // has.
+ // * The rest of the object is filled with a fixed Smi value
+ // - This is a Smi so that tagged fields become initialized to a valid
+ // tagged value.
+ // - It's a fixed value, "uninitialized_field_value", so that we can
+ // DCHECK for it when reading objects that are assumed to be partially
+ // initialized objects.
+ // * The fields of the object are deserialized in order, under the
+ // assumption that objects are laid out in such a way that any fields
+ // required for object iteration (e.g. length fields) are deserialized
+ // before fields with objects.
+ // - We ensure this is the case by DCHECKing on object allocation that the
+ // previously allocated object has a valid size (see `Allocate`).
+ HeapObject raw_obj =
+ Allocate(space, size_in_bytes, HeapObject::RequiredAlignment(*map));
+ raw_obj.set_map_after_allocation(*map);
+ MemsetTagged(raw_obj.RawField(kTaggedSize), uninitialized_field_value(),
+ size_in_tagged - 1);
+
+ // Make sure BytecodeArrays have a valid age, so that the marker doesn't
+ // break when making them older.
+ if (raw_obj.IsBytecodeArray(isolate())) {
+ BytecodeArray::cast(raw_obj).set_bytecode_age(
+ BytecodeArray::kFirstBytecodeAge);
}
- if (source()->Peek() == kNextChunk) {
- source()->Advance(1);
- // The next byte is the space for the next chunk -- it should match the
- // current space.
- // TODO(leszeks): Remove the next chunk space entirely.
- DCHECK_EQ(static_cast<SnapshotSpace>(source()->Peek()), space);
- source()->Advance(1);
- allocator()->MoveToNextChunk(space);
+
+#ifdef DEBUG
+ // We want to make sure that all embedder pointers are initialized to null.
+ if (raw_obj.IsJSObject() && JSObject::cast(raw_obj).IsApiWrapper()) {
+ JSObject js_obj = JSObject::cast(raw_obj);
+ for (int i = 0; i < js_obj.GetEmbedderFieldCount(); ++i) {
+ void* pointer;
+ CHECK(EmbedderDataSlot(js_obj, i).ToAlignedPointerSafe(isolate(),
+ &pointer));
+ CHECK_NULL(pointer);
+ }
+ } else if (raw_obj.IsEmbedderDataArray()) {
+ EmbedderDataArray array = EmbedderDataArray::cast(raw_obj);
+ EmbedderDataSlot start(array, 0);
+ EmbedderDataSlot end(array, array.length());
+ for (EmbedderDataSlot slot = start; slot < end; ++slot) {
+ void* pointer;
+ CHECK(slot.ToAlignedPointerSafe(isolate(), &pointer));
+ CHECK_NULL(pointer);
+ }
}
+#endif
- Address address = allocator()->Allocate(space, size);
- HeapObject obj = HeapObject::FromAddress(address);
+ Handle<HeapObject> obj = handle(raw_obj, isolate());
+ back_refs_.push_back(obj);
- isolate()->heap()->OnAllocationEvent(obj, size);
- MaybeObjectSlot current(address);
- MaybeObjectSlot limit(address + size);
+ ReadData(obj, 1, size_in_tagged);
+ PostProcessNewObject(map, obj, space);
- current.store(MaybeObject::FromObject(map));
- ReadData(current + 1, limit, address);
- obj = PostProcessNewObject(obj, space);
+ DCHECK(!obj->IsThinString(isolate()));
#ifdef DEBUG
- if (obj.IsCode()) {
+ if (obj->IsCode()) {
DCHECK(space == SnapshotSpace::kCode ||
space == SnapshotSpace::kReadOnlyHeap);
} else {
DCHECK_NE(space, SnapshotSpace::kCode);
}
#endif // DEBUG
+
return obj;
}
-HeapObject Deserializer::ReadMetaMap() {
- DisallowHeapAllocation no_gc;
-
+Handle<HeapObject> Deserializer::ReadMetaMap() {
const SnapshotSpace space = SnapshotSpace::kReadOnlyHeap;
- const int size = Map::kSize;
+ const int size_in_bytes = Map::kSize;
+ const int size_in_tagged = size_in_bytes / kTaggedSize;
- Address address = allocator()->Allocate(space, size);
- HeapObject obj = HeapObject::FromAddress(address);
+ HeapObject raw_obj = Allocate(space, size_in_bytes, kWordAligned);
+ raw_obj.set_map_after_allocation(Map::unchecked_cast(raw_obj));
+ MemsetTagged(raw_obj.RawField(kTaggedSize), uninitialized_field_value(),
+ size_in_tagged - 1);
- isolate()->heap()->OnAllocationEvent(obj, size);
- MaybeObjectSlot current(address);
- MaybeObjectSlot limit(address + size);
+ Handle<HeapObject> obj = handle(raw_obj, isolate());
+ back_refs_.push_back(obj);
- current.store(MaybeObject(current.address() + kHeapObjectTag));
// Set the instance-type manually, to allow backrefs to read it.
- Map::unchecked_cast(obj).set_instance_type(MAP_TYPE);
- ReadData(current + 1, limit, address);
+ Map::unchecked_cast(*obj).set_instance_type(MAP_TYPE);
- return obj;
-}
+ ReadData(obj, 1, size_in_tagged);
+ PostProcessNewObject(Handle<Map>::cast(obj), obj, space);
-void Deserializer::ReadCodeObjectBody(Address code_object_address) {
- // At this point the code object is already allocated, its map field is
- // initialized and its raw data fields and code stream are also read.
- // Now we read the rest of code header's fields.
- MaybeObjectSlot current(code_object_address + HeapObject::kHeaderSize);
- MaybeObjectSlot limit(code_object_address + Code::kDataStart);
- ReadData(current, limit, code_object_address);
-
- // Now iterate RelocInfos the same way it was done by the serialzier and
- // deserialize respective data into RelocInfos.
- Code code = Code::cast(HeapObject::FromAddress(code_object_address));
- RelocIterator it(code, Code::BodyDescriptor::kRelocModeMask);
- for (; !it.done(); it.next()) {
- RelocInfo rinfo = *it.rinfo();
- rinfo.Visit(this);
- }
+ return obj;
}
-void Deserializer::VisitCodeTarget(Code host, RelocInfo* rinfo) {
- HeapObject object = ReadObject();
+class Deserializer::RelocInfoVisitor {
+ public:
+ RelocInfoVisitor(Deserializer* deserializer,
+ const std::vector<Handle<HeapObject>>* objects)
+ : deserializer_(deserializer), objects_(objects), current_object_(0) {}
+ ~RelocInfoVisitor() { DCHECK_EQ(current_object_, objects_->size()); }
+
+ void VisitCodeTarget(Code host, RelocInfo* rinfo);
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo);
+ void VisitRuntimeEntry(Code host, RelocInfo* rinfo);
+ void VisitExternalReference(Code host, RelocInfo* rinfo);
+ void VisitInternalReference(Code host, RelocInfo* rinfo);
+ void VisitOffHeapTarget(Code host, RelocInfo* rinfo);
+
+ private:
+ Isolate* isolate() { return deserializer_->isolate(); }
+ SnapshotByteSource& source() { return deserializer_->source_; }
+
+ Deserializer* deserializer_;
+ const std::vector<Handle<HeapObject>>* objects_;
+ int current_object_;
+};
+
+void Deserializer::RelocInfoVisitor::VisitCodeTarget(Code host,
+ RelocInfo* rinfo) {
+ HeapObject object = *objects_->at(current_object_++);
rinfo->set_target_address(Code::cast(object).raw_instruction_start());
}
-void Deserializer::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
- HeapObject object = ReadObject();
+void Deserializer::RelocInfoVisitor::VisitEmbeddedPointer(Code host,
+ RelocInfo* rinfo) {
+ HeapObject object = *objects_->at(current_object_++);
// Embedded object reference must be a strong one.
rinfo->set_target_object(isolate()->heap(), object);
}
-void Deserializer::VisitRuntimeEntry(Code host, RelocInfo* rinfo) {
+void Deserializer::RelocInfoVisitor::VisitRuntimeEntry(Code host,
+ RelocInfo* rinfo) {
// We no longer serialize code that contains runtime entries.
UNREACHABLE();
}
-void Deserializer::VisitExternalReference(Code host, RelocInfo* rinfo) {
- byte data = source_.Get();
+void Deserializer::RelocInfoVisitor::VisitExternalReference(Code host,
+ RelocInfo* rinfo) {
+ byte data = source().Get();
CHECK_EQ(data, kExternalReference);
- Address address = ReadExternalReferenceCase();
+ Address address = deserializer_->ReadExternalReferenceCase();
if (rinfo->IsCodedSpecially()) {
Address location_of_branch_data = rinfo->pc();
@@ -468,24 +662,30 @@ void Deserializer::VisitExternalReference(Code host, RelocInfo* rinfo) {
}
}
-void Deserializer::VisitInternalReference(Code host, RelocInfo* rinfo) {
- byte data = source_.Get();
+void Deserializer::RelocInfoVisitor::VisitInternalReference(Code host,
+ RelocInfo* rinfo) {
+ byte data = source().Get();
CHECK_EQ(data, kInternalReference);
// Internal reference target is encoded as an offset from code entry.
- int target_offset = source_.GetInt();
+ int target_offset = source().GetInt();
+ // TODO(jgruber,v8:11036): We are being permissive for this DCHECK, but
+ // consider using raw_instruction_size() instead of raw_body_size() in the
+ // future.
+ STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
DCHECK_LT(static_cast<unsigned>(target_offset),
- static_cast<unsigned>(host.raw_instruction_size()));
+ static_cast<unsigned>(host.raw_body_size()));
Address target = host.entry() + target_offset;
Assembler::deserialization_set_target_internal_reference_at(
rinfo->pc(), target, rinfo->rmode());
}
-void Deserializer::VisitOffHeapTarget(Code host, RelocInfo* rinfo) {
- byte data = source_.Get();
+void Deserializer::RelocInfoVisitor::VisitOffHeapTarget(Code host,
+ RelocInfo* rinfo) {
+ byte data = source().Get();
CHECK_EQ(data, kOffHeapTarget);
- int builtin_index = source_.GetInt();
+ int builtin_index = source().GetInt();
DCHECK(Builtins::IsBuiltinId(builtin_index));
CHECK_NOT_NULL(isolate()->embedded_blob_code());
@@ -503,18 +703,18 @@ void Deserializer::VisitOffHeapTarget(Code host, RelocInfo* rinfo) {
}
}
-template <typename TSlot>
-TSlot Deserializer::ReadRepeatedObject(TSlot current, int repeat_count) {
+template <typename SlotAccessor>
+int Deserializer::ReadRepeatedObject(SlotAccessor slot_accessor,
+ int repeat_count) {
CHECK_LE(2, repeat_count);
- HeapObject heap_object = ReadObject();
- DCHECK(!Heap::InYoungGeneration(heap_object));
+ Handle<HeapObject> heap_object = ReadObject();
+ DCHECK(!Heap::InYoungGeneration(*heap_object));
for (int i = 0; i < repeat_count; i++) {
- // Repeated values are not subject to the write barrier so we don't need
- // to trigger it.
- current = Write(current, MaybeObject::FromObject(heap_object));
+ // TODO(leszeks): Use a ranged barrier here.
+ slot_accessor.Write(heap_object, HeapObjectReferenceType::STRONG, i);
}
- return current;
+ return repeat_count;
}
namespace {
@@ -551,26 +751,38 @@ constexpr byte VerifyBytecodeCount(byte bytecode) {
#define CASE_R32(byte_code) CASE_R16(byte_code) : case CASE_R16(byte_code + 16)
// This generates a case range for all the spaces.
-#define CASE_RANGE_ALL_SPACES(bytecode) \
- SpaceEncoder<bytecode>::Encode(SnapshotSpace::kOld) \
- : case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kCode) \
- : case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kMap) \
- : case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kLargeObject) \
+#define CASE_RANGE_ALL_SPACES(bytecode) \
+ SpaceEncoder<bytecode>::Encode(SnapshotSpace::kOld) \
+ : case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kCode) \
+ : case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kMap) \
: case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kReadOnlyHeap)
-template <typename TSlot>
-void Deserializer::ReadData(TSlot current, TSlot limit,
- Address current_object_address) {
- while (current < limit) {
+void Deserializer::ReadData(Handle<HeapObject> object, int start_slot_index,
+ int end_slot_index) {
+ int current = start_slot_index;
+ while (current < end_slot_index) {
byte data = source_.Get();
- current = ReadSingleBytecodeData(data, current, current_object_address);
+ current += ReadSingleBytecodeData(
+ data, SlotAccessorForHeapObject::ForSlotIndex(object, current));
}
- CHECK_EQ(limit, current);
+ CHECK_EQ(current, end_slot_index);
}
-template <typename TSlot>
-TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
- Address current_object_address) {
+void Deserializer::ReadData(FullMaybeObjectSlot start,
+ FullMaybeObjectSlot end) {
+ FullMaybeObjectSlot current = start;
+ while (current < end) {
+ byte data = source_.Get();
+ current += ReadSingleBytecodeData(data, SlotAccessorForRootSlots(current));
+ }
+ CHECK_EQ(current, end);
+}
+
+template <typename SlotAccessor>
+int Deserializer::ReadSingleBytecodeData(byte data,
+ SlotAccessor slot_accessor) {
+ using TSlot = decltype(slot_accessor.slot());
+
switch (data) {
// Deserialize a new object and write a pointer to it to the current
// object.
@@ -578,19 +790,30 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
SnapshotSpace space = NewObject::Decode(data);
// Save the reference type before recursing down into reading the object.
HeapObjectReferenceType ref_type = GetAndResetNextReferenceType();
- HeapObject heap_object = ReadObject(space);
- DCHECK(!Heap::InYoungGeneration(heap_object));
- return Write(current, HeapObjectReference::From(heap_object, ref_type));
+ Handle<HeapObject> heap_object = ReadObject(space);
+ return slot_accessor.Write(heap_object, ref_type);
}
// Find a recently deserialized object using its offset from the current
// allocation point and write a pointer to it to the current object.
- case CASE_RANGE_ALL_SPACES(kBackref): {
- SnapshotSpace space = BackRef::Decode(data);
- HeapObject heap_object = GetBackReferencedObject(space);
- DCHECK(!Heap::InYoungGeneration(heap_object));
- return Write(current, HeapObjectReference::From(
- heap_object, GetAndResetNextReferenceType()));
+ case kBackref: {
+ Handle<HeapObject> heap_object = GetBackReferencedObject();
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
+ }
+
+ // Reference an object in the read-only heap. This should be used when an
+ // object is read-only, but is not a root.
+ case kReadOnlyHeapRef: {
+ DCHECK(isolate()->heap()->deserialization_complete());
+ uint32_t chunk_index = source_.GetInt();
+ uint32_t chunk_offset = source_.GetInt();
+
+ ReadOnlySpace* read_only_space = isolate()->heap()->read_only_space();
+ ReadOnlyPage* page = read_only_space->pages()[chunk_index];
+ Address address = page->OffsetToAddress(chunk_offset);
+ HeapObject heap_object = HeapObject::FromAddress(address);
+
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
}
// Find an object in the roots array and write a pointer to it to the
@@ -598,41 +821,39 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
case kRootArray: {
int id = source_.GetInt();
RootIndex root_index = static_cast<RootIndex>(id);
- HeapObject heap_object = HeapObject::cast(isolate()->root(root_index));
- DCHECK(!Heap::InYoungGeneration(heap_object));
+ Handle<HeapObject> heap_object =
+ Handle<HeapObject>::cast(isolate()->root_handle(root_index));
hot_objects_.Add(heap_object);
- return Write(current, HeapObjectReference::From(
- heap_object, GetAndResetNextReferenceType()));
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
}
// Find an object in the startup object cache and write a pointer to it to
// the current object.
case kStartupObjectCache: {
int cache_index = source_.GetInt();
+ // TODO(leszeks): Could we use the address of the startup_object_cache
+ // entry as a Handle backing?
HeapObject heap_object =
HeapObject::cast(isolate()->startup_object_cache()->at(cache_index));
- DCHECK(!Heap::InYoungGeneration(heap_object));
- return Write(current, HeapObjectReference::From(
- heap_object, GetAndResetNextReferenceType()));
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
}
// Find an object in the read-only object cache and write a pointer to it
// to the current object.
case kReadOnlyObjectCache: {
int cache_index = source_.GetInt();
+ // TODO(leszeks): Could we use the address of the cached_read_only_object
+ // entry as a Handle backing?
HeapObject heap_object = HeapObject::cast(
isolate()->read_only_heap()->cached_read_only_object(cache_index));
- DCHECK(!Heap::InYoungGeneration(heap_object));
- return Write(current, HeapObjectReference::From(
- heap_object, GetAndResetNextReferenceType()));
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
}
// Deserialize a new meta-map and write a pointer to it to the current
// object.
case kNewMetaMap: {
- HeapObject heap_object = ReadMetaMap();
- DCHECK(!Heap::InYoungGeneration(heap_object));
- return Write(current, HeapObjectReference::Strong(heap_object));
+ Handle<HeapObject> heap_object = ReadMetaMap();
+ return slot_accessor.Write(heap_object, HeapObjectReferenceType::STRONG);
}
// Find an external reference and write a pointer to it to the current
@@ -641,10 +862,11 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
case kExternalReference: {
Address address = ReadExternalReferenceCase();
if (V8_HEAP_SANDBOX_BOOL && data == kSandboxedExternalReference) {
- return WriteExternalPointer(current, address);
+ return WriteExternalPointer(slot_accessor.slot(), address,
+ kForeignForeignAddressTag);
} else {
DCHECK(!V8_HEAP_SANDBOX_BOOL);
- return WriteAddress(current, address);
+ return WriteAddress(slot_accessor.slot(), address);
}
}
@@ -657,49 +879,36 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
// the current object.
case kAttachedReference: {
int index = source_.GetInt();
- HeapObjectReference ref = HeapObjectReference::From(
- *attached_objects_[index], GetAndResetNextReferenceType());
+ Handle<HeapObject> heap_object = attached_objects_[index];
// This is the only case where we might encounter new space objects, so
- // maybe emit a write barrier before returning the updated slot.
- TSlot ret = Write(current, ref);
- if (Heap::InYoungGeneration(ref)) {
- HeapObject current_object =
- HeapObject::FromAddress(current_object_address);
- GenerationalBarrier(current_object, MaybeObjectSlot(current.address()),
- ref);
- }
- return ret;
+ // maybe emit a generational write barrier.
+ return slot_accessor.WriteWithGenerationalBarrier(
+ heap_object, GetAndResetNextReferenceType());
}
case kNop:
- return current;
-
- // NextChunk should only be seen during object allocation.
- case kNextChunk:
- UNREACHABLE();
+ return 0;
case kRegisterPendingForwardRef: {
- DCHECK_NE(current_object_address, kNullAddress);
- HeapObject obj = HeapObject::FromAddress(current_object_address);
HeapObjectReferenceType ref_type = GetAndResetNextReferenceType();
- unresolved_forward_refs_.emplace_back(
- obj, current.address() - current_object_address, ref_type);
+ unresolved_forward_refs_.emplace_back(slot_accessor.object(),
+ slot_accessor.offset(), ref_type);
num_unresolved_forward_refs_++;
- return current + 1;
+ return 1;
}
case kResolvePendingForwardRef: {
// Pending forward refs can only be resolved after the heap object's map
// field is deserialized; currently they only appear immediately after
// the map field.
- DCHECK_EQ(current.address(), current_object_address + kTaggedSize);
- HeapObject obj = HeapObject::FromAddress(current_object_address);
+ DCHECK_EQ(slot_accessor.offset(), HeapObject::kHeaderSize);
+ Handle<HeapObject> obj = slot_accessor.object();
int index = source_.GetInt();
auto& forward_ref = unresolved_forward_refs_[index];
- TaggedField<MaybeObject>::store(
- forward_ref.object, forward_ref.offset,
- HeapObjectReference::From(obj, forward_ref.ref_type));
+ SlotAccessorForHeapObject::ForSlotOffset(forward_ref.object,
+ forward_ref.offset)
+ .Write(*obj, forward_ref.ref_type);
num_unresolved_forward_refs_--;
if (num_unresolved_forward_refs_ == 0) {
// If there's no more pending fields, clear the entire pending field
@@ -707,9 +916,9 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
unresolved_forward_refs_.clear();
} else {
// Otherwise, at least clear the pending field.
- forward_ref.object = HeapObject();
+ forward_ref.object = Handle<HeapObject>();
}
- return current;
+ return 0;
}
case kSynchronize:
@@ -719,31 +928,74 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
// Deserialize raw data of variable length.
case kVariableRawData: {
- int size_in_bytes = source_.GetInt();
- DCHECK(IsAligned(size_in_bytes, kTaggedSize));
- source_.CopyRaw(current.ToVoidPtr(), size_in_bytes);
- return TSlot(current.address() + size_in_bytes);
+ // This operation is only supported for tagged-size slots, else we might
+ // become misaligned.
+ DCHECK_EQ(TSlot::kSlotDataSize, kTaggedSize);
+ int size_in_tagged = source_.GetInt();
+ // TODO(leszeks): Only copy slots when there are Smis in the serialized
+ // data.
+ source_.CopySlots(slot_accessor.slot().location(), size_in_tagged);
+ return size_in_tagged;
}
// Deserialize raw code directly into the body of the code object.
- case kVariableRawCode: {
- // VariableRawCode can only occur right after the heap object header.
- DCHECK_EQ(current.address(), current_object_address + kTaggedSize);
- int size_in_bytes = source_.GetInt();
- DCHECK(IsAligned(size_in_bytes, kTaggedSize));
- source_.CopyRaw(
- reinterpret_cast<void*>(current_object_address + Code::kDataStart),
- size_in_bytes);
- // Deserialize tagged fields in the code object header and reloc infos.
- ReadCodeObjectBody(current_object_address);
- // Set current to the code object end.
- return TSlot(current.address() + Code::kDataStart -
- HeapObject::kHeaderSize + size_in_bytes);
+ case kCodeBody: {
+ // This operation is only supported for tagged-size slots, else we might
+ // become misaligned.
+ DCHECK_EQ(TSlot::kSlotDataSize, kTaggedSize);
+ // CodeBody can only occur right after the heap object header.
+ DCHECK_EQ(slot_accessor.offset(), HeapObject::kHeaderSize);
+
+ int size_in_tagged = source_.GetInt();
+ int size_in_bytes = size_in_tagged * kTaggedSize;
+
+ {
+ DisallowGarbageCollection no_gc;
+ Code code = Code::cast(*slot_accessor.object());
+
+ // First deserialize the code itself.
+ source_.CopyRaw(
+ reinterpret_cast<void*>(code.address() + Code::kDataStart),
+ size_in_bytes);
+ }
+
+ // Then deserialize the code header
+ ReadData(slot_accessor.object(), HeapObject::kHeaderSize / kTaggedSize,
+ Code::kDataStart / kTaggedSize);
+
+ // Then deserialize the pre-serialized RelocInfo objects.
+ std::vector<Handle<HeapObject>> preserialized_objects;
+ while (source_.Peek() != kSynchronize) {
+ Handle<HeapObject> obj = ReadObject();
+ preserialized_objects.push_back(obj);
+ }
+ // Skip the synchronize bytecode.
+ source_.Advance(1);
+
+ // Finally iterate RelocInfos (the same way it was done by the serializer)
+ // and deserialize respective data into RelocInfos. The RelocIterator
+ // holds a raw pointer to the code, so we have to disable garbage
+ // collection here. It's ok though, any objects it would have needed are
+ // in the preserialized_objects vector.
+ {
+ DisallowGarbageCollection no_gc;
+
+ Code code = Code::cast(*slot_accessor.object());
+ RelocInfoVisitor visitor(this, &preserialized_objects);
+ for (RelocIterator it(code, Code::BodyDescriptor::kRelocModeMask);
+ !it.done(); it.next()) {
+ it.rinfo()->Visit(&visitor);
+ }
+ }
+
+ // Advance to the end of the code object.
+ return (Code::kDataStart - HeapObject::kHeaderSize) / kTaggedSize +
+ size_in_tagged;
}
case kVariableRepeat: {
int repeats = VariableRepeatCount::Decode(source_.GetInt());
- return ReadRepeatedObject(current, repeats);
+ return ReadRepeatedObject(slot_accessor, repeats);
}
case kOffHeapBackingStore: {
@@ -755,7 +1007,7 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
CHECK_NOT_NULL(backing_store);
source_.CopyRaw(backing_store->buffer_start(), byte_length);
backing_stores_.push_back(std::move(backing_store));
- return current;
+ return 0;
}
case kSandboxedApiReference:
@@ -771,29 +1023,24 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
address = reinterpret_cast<Address>(NoExternalReferencesCallback);
}
if (V8_HEAP_SANDBOX_BOOL && data == kSandboxedApiReference) {
- return WriteExternalPointer(current, address);
+ return WriteExternalPointer(slot_accessor.slot(), address,
+ kForeignForeignAddressTag);
} else {
DCHECK(!V8_HEAP_SANDBOX_BOOL);
- return WriteAddress(current, address);
+ return WriteAddress(slot_accessor.slot(), address);
}
}
case kClearedWeakReference:
- return Write(current, HeapObjectReference::ClearedValue(isolate()));
+ return slot_accessor.Write(HeapObjectReference::ClearedValue(isolate()));
case kWeakPrefix: {
// We shouldn't have two weak prefixes in a row.
DCHECK(!next_reference_is_weak_);
// We shouldn't have weak refs without a current object.
- DCHECK_NE(current_object_address, kNullAddress);
+ DCHECK_NE(slot_accessor.object()->address(), kNullAddress);
next_reference_is_weak_ = true;
- return current;
- }
-
- case CASE_RANGE(kAlignmentPrefix, 3): {
- int alignment = data - (SerializerDeserializer::kAlignmentPrefix - 1);
- allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
- return current;
+ return 0;
}
case CASE_RANGE(kRootArrayConstants, 32): {
@@ -805,33 +1052,36 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
static_cast<int>(RootIndex::kLastImmortalImmovableRoot));
RootIndex root_index = RootArrayConstant::Decode(data);
- MaybeObject object = MaybeObject(ReadOnlyRoots(isolate()).at(root_index));
- DCHECK(!Heap::InYoungGeneration(object));
- return Write(current, object);
+ Handle<HeapObject> heap_object =
+ Handle<HeapObject>::cast(isolate()->root_handle(root_index));
+ return slot_accessor.Write(heap_object, HeapObjectReferenceType::STRONG);
}
case CASE_RANGE(kHotObject, 8): {
int index = HotObject::Decode(data);
- HeapObject hot_object = hot_objects_.Get(index);
- DCHECK(!Heap::InYoungGeneration(hot_object));
- return Write(current, HeapObjectReference::From(
- hot_object, GetAndResetNextReferenceType()));
+ Handle<HeapObject> hot_object = hot_objects_.Get(index);
+ return slot_accessor.Write(hot_object, GetAndResetNextReferenceType());
}
case CASE_RANGE(kFixedRawData, 32): {
// Deserialize raw data of fixed length from 1 to 32 times kTaggedSize.
int size_in_tagged = FixedRawDataWithSize::Decode(data);
- source_.CopyRaw(current.ToVoidPtr(), size_in_tagged * kTaggedSize);
-
- int size_in_bytes = size_in_tagged * kTaggedSize;
- int size_in_slots = size_in_bytes / TSlot::kSlotDataSize;
- DCHECK(IsAligned(size_in_bytes, TSlot::kSlotDataSize));
- return current + size_in_slots;
+ STATIC_ASSERT(TSlot::kSlotDataSize == kTaggedSize ||
+ TSlot::kSlotDataSize == 2 * kTaggedSize);
+ int size_in_slots = size_in_tagged / (TSlot::kSlotDataSize / kTaggedSize);
+ // kFixedRawData can have kTaggedSize != TSlot::kSlotDataSize when
+ // serializing Smi roots in pointer-compressed builds. In this case, the
+ // size in bytes is unconditionally the (full) slot size.
+ DCHECK_IMPLIES(kTaggedSize != TSlot::kSlotDataSize, size_in_slots == 1);
+ // TODO(leszeks): Only copy slots when there are Smis in the serialized
+ // data.
+ source_.CopySlots(slot_accessor.slot().location(), size_in_slots);
+ return size_in_slots;
}
case CASE_RANGE(kFixedRepeat, 16): {
int repeats = FixedRepeatWithCount::Decode(data);
- return ReadRepeatedObject(current, repeats);
+ return ReadRepeatedObject(slot_accessor, repeats);
}
#ifdef DEBUG
@@ -864,5 +1114,42 @@ Address Deserializer::ReadExternalReferenceCase() {
return isolate()->external_reference_table()->address(reference_id);
}
+namespace {
+AllocationType SpaceToType(SnapshotSpace space) {
+ switch (space) {
+ case SnapshotSpace::kCode:
+ return AllocationType::kCode;
+ case SnapshotSpace::kMap:
+ return AllocationType::kMap;
+ case SnapshotSpace::kOld:
+ return AllocationType::kOld;
+ case SnapshotSpace::kReadOnlyHeap:
+ return AllocationType::kReadOnly;
+ }
+}
+} // namespace
+
+HeapObject Deserializer::Allocate(SnapshotSpace space, int size,
+ AllocationAlignment alignment) {
+#ifdef DEBUG
+ if (!previous_allocation_obj_.is_null()) {
+ // Make sure that the previous object is initialized sufficiently to
+ // be iterated over by the GC.
+ int object_size = previous_allocation_obj_->Size();
+ DCHECK_LE(object_size, previous_allocation_size_);
+ }
+#endif
+
+ HeapObject obj = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
+ size, SpaceToType(space), AllocationOrigin::kRuntime, alignment);
+
+#ifdef DEBUG
+ previous_allocation_obj_ = handle(obj, isolate());
+ previous_allocation_size_ = size;
+#endif
+
+ return obj;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index ae410bacd3..62f7ea39e0 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -17,7 +17,6 @@
#include "src/objects/map.h"
#include "src/objects/string-table.h"
#include "src/objects/string.h"
-#include "src/snapshot/deserializer-allocator.h"
#include "src/snapshot/serializer-deserializer.h"
#include "src/snapshot/snapshot-source-sink.h"
@@ -40,27 +39,31 @@ class Object;
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
public:
+ // Smi value for filling in not-yet initialized tagged field values with a
+ // valid tagged pointer. A field value equal to this doesn't necessarily
+ // indicate that a field is uninitialized, but an uninitialized field should
+ // definitely equal this value.
+ //
+ // This _has_ to be kNullAddress, so that an uninitialized_field_value read as
+ // an embedded pointer field is interpreted as nullptr. This is so that
+ // uninitialised embedded pointers are not forwarded to the embedded as part
+ // of embedder tracing (and similar mechanisms), as nullptrs are skipped for
+ // those cases and otherwise the embedder would try to dereference the
+ // uninitialized pointer value.
+ static constexpr Smi uninitialized_field_value() { return Smi(kNullAddress); }
+
~Deserializer() override;
+ Deserializer(const Deserializer&) = delete;
+ Deserializer& operator=(const Deserializer&) = delete;
- void SetRehashability(bool v) { can_rehash_ = v; }
uint32_t GetChecksum() const { return source_.GetChecksum(); }
protected:
// Create a deserializer from a snapshot byte source.
- template <class Data>
- Deserializer(Data* data, bool deserializing_user_code)
- : isolate_(nullptr),
- source_(data->Payload()),
- magic_number_(data->GetMagicNumber()),
- deserializing_user_code_(deserializing_user_code),
- can_rehash_(false) {
- allocator()->DecodeReservation(data->Reservations());
- // We start the indices here at 1, so that we can distinguish between an
- // actual index and a nullptr in a deserialized object requiring fix-up.
- backing_stores_.push_back({});
- }
+ Deserializer(Isolate* isolate, Vector<const byte> payload,
+ uint32_t magic_number, bool deserializing_user_code,
+ bool can_rehash);
- void Initialize(Isolate* isolate);
void DeserializeDeferredObjects();
// Create Log events for newly deserialized objects.
@@ -68,9 +71,14 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
void LogScriptEvents(Script script);
void LogNewMapEvents();
+ // Descriptor arrays are deserialized as "strong", so that there is no risk of
+ // them getting trimmed during a partial deserialization. This method makes
+ // them "weak" again after deserialization completes.
+ void WeakenDescriptorArrays();
+
// This returns the address of an object that has been described in the
- // snapshot by chunk index and offset.
- HeapObject GetBackReferencedObject(SnapshotSpace space);
+ // snapshot by object vector index.
+ Handle<HeapObject> GetBackReferencedObject();
// Add an object to back an attached reference. The order to add objects must
// mirror the order they are added in the serializer.
@@ -85,17 +93,17 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
Isolate* isolate() const { return isolate_; }
SnapshotByteSource* source() { return &source_; }
- const std::vector<AllocationSite>& new_allocation_sites() const {
+ const std::vector<Handle<AllocationSite>>& new_allocation_sites() const {
return new_allocation_sites_;
}
- const std::vector<Code>& new_code_objects() const {
+ const std::vector<Handle<Code>>& new_code_objects() const {
return new_code_objects_;
}
- const std::vector<Map>& new_maps() const { return new_maps_; }
- const std::vector<AccessorInfo>& accessor_infos() const {
+ const std::vector<Handle<Map>>& new_maps() const { return new_maps_; }
+ const std::vector<Handle<AccessorInfo>>& accessor_infos() const {
return accessor_infos_;
}
- const std::vector<CallHandlerInfo>& call_handler_infos() const {
+ const std::vector<Handle<CallHandlerInfo>>& call_handler_infos() const {
return call_handler_infos_;
}
const std::vector<Handle<Script>>& new_scripts() const {
@@ -106,76 +114,97 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
return new_off_heap_array_buffers_;
}
+ const std::vector<Handle<DescriptorArray>>& new_descriptor_arrays() const {
+ return new_descriptor_arrays_;
+ }
+
std::shared_ptr<BackingStore> backing_store(size_t i) {
DCHECK_LT(i, backing_stores_.size());
return backing_stores_[i];
}
- DeserializerAllocator* allocator() { return &allocator_; }
bool deserializing_user_code() const { return deserializing_user_code_; }
bool can_rehash() const { return can_rehash_; }
void Rehash();
+ Handle<HeapObject> ReadObject();
+
private:
+ class RelocInfoVisitor;
+ // A circular queue of hot objects. This is added to in the same order as in
+ // Serializer::HotObjectsList, but this stores the objects as a vector of
+ // existing handles. This allows us to add Handles to the queue without having
+ // to create new handles. Note that this depends on those Handles staying
+ // valid as long as the HotObjectsList is alive.
+ class HotObjectsList {
+ public:
+ HotObjectsList() = default;
+ HotObjectsList(const HotObjectsList&) = delete;
+ HotObjectsList& operator=(const HotObjectsList&) = delete;
+
+ void Add(Handle<HeapObject> object) {
+ circular_queue_[index_] = object;
+ index_ = (index_ + 1) & kSizeMask;
+ }
+
+ Handle<HeapObject> Get(int index) {
+ DCHECK(!circular_queue_[index].is_null());
+ return circular_queue_[index];
+ }
+
+ private:
+ static const int kSize = kHotObjectCount;
+ static const int kSizeMask = kSize - 1;
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(kSize));
+ Handle<HeapObject> circular_queue_[kSize];
+ int index_ = 0;
+ };
+
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override;
void Synchronize(VisitorSynchronization::SyncTag tag) override;
template <typename TSlot>
- inline TSlot Write(TSlot dest, MaybeObject value);
-
- template <typename TSlot>
- inline TSlot Write(TSlot dest, HeapObject value,
- HeapObjectReferenceType type);
+ inline int WriteAddress(TSlot dest, Address value);
template <typename TSlot>
- inline TSlot WriteAddress(TSlot dest, Address value);
+ inline int WriteExternalPointer(TSlot dest, Address value,
+ ExternalPointerTag tag);
- template <typename TSlot>
- inline TSlot WriteExternalPointer(TSlot dest, Address value);
+ // Fills in a heap object's data from start to end (exclusive). Start and end
+ // are slot indices within the object.
+ void ReadData(Handle<HeapObject> object, int start_slot_index,
+ int end_slot_index);
- // Fills in some heap data in an area from start to end (non-inclusive). The
- // object_address is the address of the object we are writing into, or nullptr
- // if we are not writing into an object, i.e. if we are writing a series of
- // tagged values that are not on the heap.
- template <typename TSlot>
- void ReadData(TSlot start, TSlot end, Address object_address);
+ // Fills in a contiguous range of full object slots (e.g. root pointers) from
+ // start to end (exclusive).
+ void ReadData(FullMaybeObjectSlot start, FullMaybeObjectSlot end);
// Helper for ReadData which reads the given bytecode and fills in some heap
// data into the given slot. May fill in zero or multiple slots, so it returns
- // the next unfilled slot.
- template <typename TSlot>
- TSlot ReadSingleBytecodeData(byte data, TSlot current,
- Address object_address);
+ // the number of slots filled.
+ template <typename SlotAccessor>
+ int ReadSingleBytecodeData(byte data, SlotAccessor slot_accessor);
// A helper function for ReadData for reading external references.
inline Address ReadExternalReferenceCase();
- HeapObject ReadObject(SnapshotSpace space_number);
- HeapObject ReadMetaMap();
- void ReadCodeObjectBody(Address code_object_address);
+ Handle<HeapObject> ReadObject(SnapshotSpace space_number);
+ Handle<HeapObject> ReadMetaMap();
HeapObjectReferenceType GetAndResetNextReferenceType();
- protected:
- HeapObject ReadObject();
-
- public:
- void VisitCodeTarget(Code host, RelocInfo* rinfo);
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo);
- void VisitRuntimeEntry(Code host, RelocInfo* rinfo);
- void VisitExternalReference(Code host, RelocInfo* rinfo);
- void VisitInternalReference(Code host, RelocInfo* rinfo);
- void VisitOffHeapTarget(Code host, RelocInfo* rinfo);
-
- private:
- template <typename TSlot>
- TSlot ReadRepeatedObject(TSlot current, int repeat_count);
+ template <typename SlotGetter>
+ int ReadRepeatedObject(SlotGetter slot_getter, int repeat_count);
// Special handling for serialized code like hooking up internalized strings.
- HeapObject PostProcessNewObject(HeapObject obj, SnapshotSpace space);
+ void PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
+ SnapshotSpace space);
+
+ HeapObject Allocate(SnapshotSpace space, int size,
+ AllocationAlignment alignment);
// Cached current isolate.
Isolate* isolate_;
@@ -186,15 +215,20 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
SnapshotByteSource source_;
uint32_t magic_number_;
- std::vector<Map> new_maps_;
- std::vector<AllocationSite> new_allocation_sites_;
- std::vector<Code> new_code_objects_;
- std::vector<AccessorInfo> accessor_infos_;
- std::vector<CallHandlerInfo> call_handler_infos_;
+ HotObjectsList hot_objects_;
+ std::vector<Handle<Map>> new_maps_;
+ std::vector<Handle<AllocationSite>> new_allocation_sites_;
+ std::vector<Handle<Code>> new_code_objects_;
+ std::vector<Handle<AccessorInfo>> accessor_infos_;
+ std::vector<Handle<CallHandlerInfo>> call_handler_infos_;
std::vector<Handle<Script>> new_scripts_;
std::vector<Handle<JSArrayBuffer>> new_off_heap_array_buffers_;
+ std::vector<Handle<DescriptorArray>> new_descriptor_arrays_;
std::vector<std::shared_ptr<BackingStore>> backing_stores_;
+ // Vector of allocated objects that can be accessed by a backref, by index.
+ std::vector<Handle<HeapObject>> back_refs_;
+
// Unresolved forward references (registered with kRegisterPendingForwardRef)
// are collected in order as (object, field offset) pairs. The subsequent
// forward ref resolution (with kResolvePendingForwardRef) accesses this
@@ -202,34 +236,32 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
//
// The vector is cleared when there are no more unresolved forward refs.
struct UnresolvedForwardRef {
- UnresolvedForwardRef(HeapObject object, int offset,
+ UnresolvedForwardRef(Handle<HeapObject> object, int offset,
HeapObjectReferenceType ref_type)
: object(object), offset(offset), ref_type(ref_type) {}
- HeapObject object;
+ Handle<HeapObject> object;
int offset;
HeapObjectReferenceType ref_type;
};
std::vector<UnresolvedForwardRef> unresolved_forward_refs_;
int num_unresolved_forward_refs_ = 0;
- DeserializerAllocator allocator_;
const bool deserializing_user_code_;
bool next_reference_is_weak_ = false;
// TODO(6593): generalize rehashing, and remove this flag.
bool can_rehash_;
- std::vector<HeapObject> to_rehash_;
+ std::vector<Handle<HeapObject>> to_rehash_;
#ifdef DEBUG
uint32_t num_api_references_;
-#endif // DEBUG
-
- // For source(), isolate(), and allocator().
- friend class DeserializerAllocator;
- DISALLOW_COPY_AND_ASSIGN(Deserializer);
+ // Record the previous object allocated for DCHECKs.
+ Handle<HeapObject> previous_allocation_obj_;
+ int previous_allocation_size_ = 0;
+#endif // DEBUG
};
// Used to insert a deserialized internalized string into the string table.
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.cc b/deps/v8/src/snapshot/embedded/embedded-data.cc
index 3496a613f2..03702bf331 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-data.cc
@@ -50,9 +50,11 @@ Code InstructionStream::TryLookupCode(Isolate* isolate, Address address) {
}
// static
-void InstructionStream::CreateOffHeapInstructionStream(
- Isolate* isolate, uint8_t** code, uint32_t* code_size, uint8_t** metadata,
- uint32_t* metadata_size) {
+void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
+ uint8_t** code,
+ uint32_t* code_size,
+ uint8_t** data,
+ uint32_t* data_size) {
// Create the embedded blob from scratch using the current Isolate's heap.
EmbeddedData d = EmbeddedData::FromIsolate(isolate);
@@ -71,14 +73,13 @@ void InstructionStream::CreateOffHeapInstructionStream(
alignment, PageAllocator::kReadWrite));
CHECK_NOT_NULL(allocated_code_bytes);
- void* const requested_allocation_metadata_address =
+ void* const requested_allocation_data_address =
AlignedAddress(isolate->heap()->GetRandomMmapAddr(), alignment);
- const uint32_t allocation_metadata_size =
- RoundUp(d.metadata_size(), alignment);
- uint8_t* allocated_metadata_bytes = static_cast<uint8_t*>(AllocatePages(
- page_allocator, requested_allocation_metadata_address,
- allocation_metadata_size, alignment, PageAllocator::kReadWrite));
- CHECK_NOT_NULL(allocated_metadata_bytes);
+ const uint32_t allocation_data_size = RoundUp(d.data_size(), alignment);
+ uint8_t* allocated_data_bytes = static_cast<uint8_t*>(AllocatePages(
+ page_allocator, requested_allocation_data_address, allocation_data_size,
+ alignment, PageAllocator::kReadWrite));
+ CHECK_NOT_NULL(allocated_data_bytes);
// Copy the embedded blob into the newly allocated backing store. Switch
// permissions to read-execute since builtin code is immutable from now on
@@ -92,14 +93,14 @@ void InstructionStream::CreateOffHeapInstructionStream(
CHECK(SetPermissions(page_allocator, allocated_code_bytes,
allocation_code_size, PageAllocator::kReadExecute));
- std::memcpy(allocated_metadata_bytes, d.metadata(), d.metadata_size());
- CHECK(SetPermissions(page_allocator, allocated_metadata_bytes,
- allocation_metadata_size, PageAllocator::kRead));
+ std::memcpy(allocated_data_bytes, d.data(), d.data_size());
+ CHECK(SetPermissions(page_allocator, allocated_data_bytes,
+ allocation_data_size, PageAllocator::kRead));
*code = allocated_code_bytes;
*code_size = d.code_size();
- *metadata = allocated_metadata_bytes;
- *metadata_size = d.metadata_size();
+ *data = allocated_data_bytes;
+ *data_size = d.data_size();
d.Dispose();
}
@@ -107,13 +108,13 @@ void InstructionStream::CreateOffHeapInstructionStream(
// static
void InstructionStream::FreeOffHeapInstructionStream(uint8_t* code,
uint32_t code_size,
- uint8_t* metadata,
- uint32_t metadata_size) {
+ uint8_t* data,
+ uint32_t data_size) {
v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
const uint32_t page_size =
static_cast<uint32_t>(page_allocator->AllocatePageSize());
CHECK(FreePages(page_allocator, code, RoundUp(code_size, page_size)));
- CHECK(FreePages(page_allocator, metadata, RoundUp(metadata_size, page_size)));
+ CHECK(FreePages(page_allocator, data, RoundUp(data_size, page_size)));
}
namespace {
@@ -157,9 +158,8 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (int i = 0; i < Builtins::builtin_count; i++) {
- if (!Builtins::IsIsolateIndependent(i)) continue;
-
Code code = isolate->builtins()->builtin(i);
RelocIterator on_heap_it(code, kRelocMask);
RelocIterator off_heap_it(blob, code, kRelocMask);
@@ -205,37 +205,40 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
Builtins* builtins = isolate->builtins();
// Store instruction stream lengths and offsets.
- std::vector<struct Metadata> metadata(kTableSize);
+ std::vector<struct LayoutDescription> layout_descriptions(kTableSize);
bool saw_unsafe_builtin = false;
uint32_t raw_code_size = 0;
+ uint32_t raw_data_size = 0;
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (int i = 0; i < Builtins::builtin_count; i++) {
Code code = builtins->builtin(i);
- if (Builtins::IsIsolateIndependent(i)) {
- // Sanity-check that the given builtin is isolate-independent and does not
- // use the trampoline register in its calling convention.
- if (!code.IsIsolateIndependent(isolate)) {
- saw_unsafe_builtin = true;
- fprintf(stderr, "%s is not isolate-independent.\n", Builtins::name(i));
- }
- if (BuiltinAliasesOffHeapTrampolineRegister(isolate, code)) {
- saw_unsafe_builtin = true;
- fprintf(stderr, "%s aliases the off-heap trampoline register.\n",
- Builtins::name(i));
- }
-
- uint32_t length = static_cast<uint32_t>(code.raw_instruction_size());
-
- DCHECK_EQ(0, raw_code_size % kCodeAlignment);
- metadata[i].instructions_offset = raw_code_size;
- metadata[i].instructions_length = length;
-
- // Align the start of each instruction stream.
- raw_code_size += PadAndAlign(length);
- } else {
- metadata[i].instructions_offset = raw_code_size;
+ // Sanity-check that the given builtin is isolate-independent and does not
+ // use the trampoline register in its calling convention.
+ if (!code.IsIsolateIndependent(isolate)) {
+ saw_unsafe_builtin = true;
+ fprintf(stderr, "%s is not isolate-independent.\n", Builtins::name(i));
+ }
+ if (BuiltinAliasesOffHeapTrampolineRegister(isolate, code)) {
+ saw_unsafe_builtin = true;
+ fprintf(stderr, "%s aliases the off-heap trampoline register.\n",
+ Builtins::name(i));
}
+
+ uint32_t instruction_size =
+ static_cast<uint32_t>(code.raw_instruction_size());
+ uint32_t metadata_size = static_cast<uint32_t>(code.raw_metadata_size());
+
+ DCHECK_EQ(0, raw_code_size % kCodeAlignment);
+ layout_descriptions[i].instruction_offset = raw_code_size;
+ layout_descriptions[i].instruction_length = instruction_size;
+ layout_descriptions[i].metadata_offset = raw_data_size;
+ layout_descriptions[i].metadata_length = metadata_size;
+
+ // Align the start of each section.
+ raw_code_size += PadAndAlignCode(instruction_size);
+ raw_data_size += PadAndAlignData(metadata_size);
}
CHECK_WITH_MSG(
!saw_unsafe_builtin,
@@ -243,12 +246,15 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
"isolate-dependent code or aliases the off-heap trampoline register. "
"If in doubt, ask jgruber@");
+ // Allocate space for the code section, value-initialized to 0.
+ STATIC_ASSERT(RawCodeOffset() == 0);
const uint32_t blob_code_size = RawCodeOffset() + raw_code_size;
- uint8_t* const blob_code = new uint8_t[blob_code_size];
- uint8_t* const raw_code_start = blob_code + RawCodeOffset();
- const uint32_t blob_metadata_size =
- MetadataTableOffset() + MetadataTableSize();
- uint8_t* const blob_metadata = new uint8_t[blob_metadata_size];
+ uint8_t* const blob_code = new uint8_t[blob_code_size]();
+
+ // Allocate space for the data section, value-initialized to 0.
+ STATIC_ASSERT(IsAligned(FixedDataSize(), Code::kMetadataAlignment));
+ const uint32_t blob_data_size = FixedDataSize() + raw_data_size;
+ uint8_t* const blob_data = new uint8_t[blob_data_size]();
// Initially zap the entire blob, effectively padding the alignment area
// between two builtins with int3's (on x64/ia32).
@@ -258,19 +264,34 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
{
STATIC_ASSERT(IsolateHashSize() == kSizetSize);
const size_t hash = isolate->HashIsolateForEmbeddedBlob();
- std::memcpy(blob_metadata + IsolateHashOffset(), &hash, IsolateHashSize());
+ std::memcpy(blob_data + IsolateHashOffset(), &hash, IsolateHashSize());
}
- // Write the metadata tables.
- DCHECK_EQ(MetadataTableSize(), sizeof(metadata[0]) * metadata.size());
- std::memcpy(blob_metadata + MetadataTableOffset(), metadata.data(),
- MetadataTableSize());
+ // Write the layout_descriptions tables.
+ DCHECK_EQ(LayoutDescriptionTableSize(),
+ sizeof(layout_descriptions[0]) * layout_descriptions.size());
+ std::memcpy(blob_data + LayoutDescriptionTableOffset(),
+ layout_descriptions.data(), LayoutDescriptionTableSize());
- // Write the raw data section.
+ // .. and the variable-size data section.
+ uint8_t* const raw_metadata_start = blob_data + RawMetadataOffset();
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (int i = 0; i < Builtins::builtin_count; i++) {
- if (!Builtins::IsIsolateIndependent(i)) continue;
Code code = builtins->builtin(i);
- uint32_t offset = metadata[i].instructions_offset;
+ uint32_t offset = layout_descriptions[i].metadata_offset;
+ uint8_t* dst = raw_metadata_start + offset;
+ DCHECK_LE(RawMetadataOffset() + offset + code.raw_metadata_size(),
+ blob_data_size);
+ std::memcpy(dst, reinterpret_cast<uint8_t*>(code.raw_metadata_start()),
+ code.raw_metadata_size());
+ }
+
+ // .. and the variable-size code section.
+ uint8_t* const raw_code_start = blob_code + RawCodeOffset();
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ Code code = builtins->builtin(i);
+ uint32_t offset = layout_descriptions[i].instruction_offset;
uint8_t* dst = raw_code_start + offset;
DCHECK_LE(RawCodeOffset() + offset + code.raw_instruction_size(),
blob_code_size);
@@ -278,20 +299,27 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
code.raw_instruction_size());
}
- EmbeddedData d(blob_code, blob_code_size, blob_metadata, blob_metadata_size);
+ EmbeddedData d(blob_code, blob_code_size, blob_data, blob_data_size);
// Fix up call targets that point to other embedded builtins.
FinalizeEmbeddedCodeTargets(isolate, &d);
// Hash the blob and store the result.
{
- STATIC_ASSERT(EmbeddedBlobHashSize() == kSizetSize);
- const size_t hash = d.CreateEmbeddedBlobHash();
- std::memcpy(blob_metadata + EmbeddedBlobHashOffset(), &hash,
- EmbeddedBlobHashSize());
-
- DCHECK_EQ(hash, d.CreateEmbeddedBlobHash());
- DCHECK_EQ(hash, d.EmbeddedBlobHash());
+ STATIC_ASSERT(EmbeddedBlobDataHashSize() == kSizetSize);
+ const size_t data_hash = d.CreateEmbeddedBlobDataHash();
+ std::memcpy(blob_data + EmbeddedBlobDataHashOffset(), &data_hash,
+ EmbeddedBlobDataHashSize());
+
+ STATIC_ASSERT(EmbeddedBlobCodeHashSize() == kSizetSize);
+ const size_t code_hash = d.CreateEmbeddedBlobCodeHash();
+ std::memcpy(blob_data + EmbeddedBlobCodeHashOffset(), &code_hash,
+ EmbeddedBlobCodeHashSize());
+
+ DCHECK_EQ(data_hash, d.CreateEmbeddedBlobDataHash());
+ DCHECK_EQ(data_hash, d.EmbeddedBlobDataHash());
+ DCHECK_EQ(code_hash, d.CreateEmbeddedBlobCodeHash());
+ DCHECK_EQ(code_hash, d.EmbeddedBlobCodeHash());
}
if (FLAG_serialization_statistics) d.PrintStatistics();
@@ -301,18 +329,30 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
Address EmbeddedData::InstructionStartOfBuiltin(int i) const {
DCHECK(Builtins::IsBuiltinId(i));
- const struct Metadata* metadata = Metadata();
- const uint8_t* result = RawCode() + metadata[i].instructions_offset;
- DCHECK_LE(result, code_ + code_size_);
- DCHECK_IMPLIES(result == code_ + code_size_,
- InstructionSizeOfBuiltin(i) == 0);
+ const struct LayoutDescription* descs = LayoutDescription();
+ const uint8_t* result = RawCode() + descs[i].instruction_offset;
+ DCHECK_LT(result, code_ + code_size_);
return reinterpret_cast<Address>(result);
}
uint32_t EmbeddedData::InstructionSizeOfBuiltin(int i) const {
DCHECK(Builtins::IsBuiltinId(i));
- const struct Metadata* metadata = Metadata();
- return metadata[i].instructions_length;
+ const struct LayoutDescription* descs = LayoutDescription();
+ return descs[i].instruction_length;
+}
+
+Address EmbeddedData::MetadataStartOfBuiltin(int i) const {
+ DCHECK(Builtins::IsBuiltinId(i));
+ const struct LayoutDescription* descs = LayoutDescription();
+ const uint8_t* result = RawMetadata() + descs[i].metadata_offset;
+ DCHECK_LE(descs[i].metadata_offset, data_size_);
+ return reinterpret_cast<Address>(result);
+}
+
+uint32_t EmbeddedData::MetadataSizeOfBuiltin(int i) const {
+ DCHECK(Builtins::IsBuiltinId(i));
+ const struct LayoutDescription* descs = LayoutDescription();
+ return descs[i].metadata_length;
}
Address EmbeddedData::InstructionStartOfBytecodeHandlers() const {
@@ -328,49 +368,49 @@ Address EmbeddedData::InstructionEndOfBytecodeHandlers() const {
InstructionSizeOfBuiltin(lastBytecodeHandler);
}
-size_t EmbeddedData::CreateEmbeddedBlobHash() const {
- STATIC_ASSERT(EmbeddedBlobHashOffset() == 0);
- STATIC_ASSERT(EmbeddedBlobHashSize() == kSizetSize);
- // Hash the entire blob except the hash field itself.
- Vector<const byte> payload1(metadata_ + EmbeddedBlobHashSize(),
- metadata_size_ - EmbeddedBlobHashSize());
- Vector<const byte> payload2(code_, code_size_);
- return Checksum(payload1, payload2);
+size_t EmbeddedData::CreateEmbeddedBlobDataHash() const {
+ STATIC_ASSERT(EmbeddedBlobDataHashOffset() == 0);
+ STATIC_ASSERT(EmbeddedBlobCodeHashOffset() == EmbeddedBlobDataHashSize());
+ STATIC_ASSERT(IsolateHashOffset() ==
+ EmbeddedBlobCodeHashOffset() + EmbeddedBlobCodeHashSize());
+ static constexpr uint32_t kFirstHashedDataOffset = IsolateHashOffset();
+ // Hash the entire data section except the embedded blob hash fields
+ // themselves.
+ Vector<const byte> payload(data_ + kFirstHashedDataOffset,
+ data_size_ - kFirstHashedDataOffset);
+ return Checksum(payload);
+}
+
+size_t EmbeddedData::CreateEmbeddedBlobCodeHash() const {
+ CHECK(FLAG_text_is_readable);
+ Vector<const byte> payload(code_, code_size_);
+ return Checksum(payload);
}
void EmbeddedData::PrintStatistics() const {
DCHECK(FLAG_serialization_statistics);
constexpr int kCount = Builtins::builtin_count;
-
- int embedded_count = 0;
- int instruction_size = 0;
int sizes[kCount];
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (int i = 0; i < kCount; i++) {
- if (!Builtins::IsIsolateIndependent(i)) continue;
- const int size = InstructionSizeOfBuiltin(i);
- instruction_size += size;
- sizes[embedded_count] = size;
- embedded_count++;
+ sizes[i] = InstructionSizeOfBuiltin(i);
}
// Sort for percentiles.
- std::sort(&sizes[0], &sizes[embedded_count]);
+ std::sort(&sizes[0], &sizes[kCount]);
- const int k50th = embedded_count * 0.5;
- const int k75th = embedded_count * 0.75;
- const int k90th = embedded_count * 0.90;
- const int k99th = embedded_count * 0.99;
+ const int k50th = kCount * 0.5;
+ const int k75th = kCount * 0.75;
+ const int k90th = kCount * 0.90;
+ const int k99th = kCount * 0.99;
PrintF("EmbeddedData:\n");
PrintF(" Total size: %d\n",
- static_cast<int>(code_size() + metadata_size()));
- PrintF(" Metadata size: %d\n",
- static_cast<int>(metadata_size()));
- PrintF(" Instruction size: %d\n", instruction_size);
- PrintF(" Padding: %d\n",
- static_cast<int>(code_size() - instruction_size));
- PrintF(" Embedded builtin count: %d\n", embedded_count);
+ static_cast<int>(code_size() + data_size()));
+ PrintF(" Data size: %d\n",
+ static_cast<int>(data_size()));
+ PrintF(" Code size: %d\n", static_cast<int>(code_size()));
PrintF(" Instruction size (50th percentile): %d\n", sizes[k50th]);
PrintF(" Instruction size (75th percentile): %d\n", sizes[k75th]);
PrintF(" Instruction size (90th percentile): %d\n", sizes[k90th]);
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.h b/deps/v8/src/snapshot/embedded/embedded-data.h
index d568be83f6..d8d2dd822d 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.h
+++ b/deps/v8/src/snapshot/embedded/embedded-data.h
@@ -32,11 +32,10 @@ class InstructionStream final : public AllStatic {
// mksnapshot. Otherwise, off-heap code is embedded directly into the binary.
static void CreateOffHeapInstructionStream(Isolate* isolate, uint8_t** code,
uint32_t* code_size,
- uint8_t** metadata,
- uint32_t* metadata_size);
+ uint8_t** data,
+ uint32_t* data_size);
static void FreeOffHeapInstructionStream(uint8_t* code, uint32_t code_size,
- uint8_t* metadata,
- uint32_t metadata_size);
+ uint8_t* data, uint32_t data_size);
};
class EmbeddedData final {
@@ -46,27 +45,26 @@ class EmbeddedData final {
static EmbeddedData FromBlob() {
return EmbeddedData(Isolate::CurrentEmbeddedBlobCode(),
Isolate::CurrentEmbeddedBlobCodeSize(),
- Isolate::CurrentEmbeddedBlobMetadata(),
- Isolate::CurrentEmbeddedBlobMetadataSize());
+ Isolate::CurrentEmbeddedBlobData(),
+ Isolate::CurrentEmbeddedBlobDataSize());
}
static EmbeddedData FromBlob(Isolate* isolate) {
- return EmbeddedData(isolate->embedded_blob_code(),
- isolate->embedded_blob_code_size(),
- isolate->embedded_blob_metadata(),
- isolate->embedded_blob_metadata_size());
+ return EmbeddedData(
+ isolate->embedded_blob_code(), isolate->embedded_blob_code_size(),
+ isolate->embedded_blob_data(), isolate->embedded_blob_data_size());
}
const uint8_t* code() const { return code_; }
uint32_t code_size() const { return code_size_; }
- const uint8_t* metadata() const { return metadata_; }
- uint32_t metadata_size() const { return metadata_size_; }
+ const uint8_t* data() const { return data_; }
+ uint32_t data_size() const { return data_size_; }
void Dispose() {
delete[] code_;
code_ = nullptr;
- delete[] metadata_;
- metadata_ = nullptr;
+ delete[] data_;
+ data_ = nullptr;
}
Address InstructionStartOfBuiltin(int i) const;
@@ -75,7 +73,8 @@ class EmbeddedData final {
Address InstructionStartOfBytecodeHandlers() const;
Address InstructionEndOfBytecodeHandlers() const;
- bool ContainsBuiltin(int i) const { return InstructionSizeOfBuiltin(i) > 0; }
+ Address MetadataStartOfBuiltin(int i) const;
+ uint32_t MetadataSizeOfBuiltin(int i) const;
uint32_t AddressForHashing(Address addr) {
Address start = reinterpret_cast<Address>(code_);
@@ -84,92 +83,132 @@ class EmbeddedData final {
}
// Padded with kCodeAlignment.
+ // TODO(v8:11045): Consider removing code alignment.
uint32_t PaddedInstructionSizeOfBuiltin(int i) const {
uint32_t size = InstructionSizeOfBuiltin(i);
- return (size == 0) ? 0 : PadAndAlign(size);
+ CHECK_NE(size, 0);
+ return PadAndAlignCode(size);
}
- size_t CreateEmbeddedBlobHash() const;
- size_t EmbeddedBlobHash() const {
- return *reinterpret_cast<const size_t*>(metadata_ +
- EmbeddedBlobHashOffset());
+ size_t CreateEmbeddedBlobDataHash() const;
+ size_t CreateEmbeddedBlobCodeHash() const;
+ size_t EmbeddedBlobDataHash() const {
+ return *reinterpret_cast<const size_t*>(data_ +
+ EmbeddedBlobDataHashOffset());
+ }
+ size_t EmbeddedBlobCodeHash() const {
+ return *reinterpret_cast<const size_t*>(data_ +
+ EmbeddedBlobCodeHashOffset());
}
size_t IsolateHash() const {
- return *reinterpret_cast<const size_t*>(metadata_ + IsolateHashOffset());
+ return *reinterpret_cast<const size_t*>(data_ + IsolateHashOffset());
}
- struct Metadata {
- // Blob layout information.
- uint32_t instructions_offset;
- uint32_t instructions_length;
+ // Blob layout information for a single instruction stream. Corresponds
+ // roughly to Code object layout (see the instruction and metadata area).
+ struct LayoutDescription {
+ // The offset and (unpadded) length of this builtin's instruction area
+ // from the start of the embedded code section.
+ uint32_t instruction_offset;
+ uint32_t instruction_length;
+ // The offset and (unpadded) length of this builtin's metadata area
+ // from the start of the embedded code section.
+ uint32_t metadata_offset;
+ uint32_t metadata_length;
};
- STATIC_ASSERT(offsetof(Metadata, instructions_offset) == 0);
- STATIC_ASSERT(offsetof(Metadata, instructions_length) == kUInt32Size);
- STATIC_ASSERT(sizeof(Metadata) == kUInt32Size + kUInt32Size);
+ STATIC_ASSERT(offsetof(LayoutDescription, instruction_offset) ==
+ 0 * kUInt32Size);
+ STATIC_ASSERT(offsetof(LayoutDescription, instruction_length) ==
+ 1 * kUInt32Size);
+ STATIC_ASSERT(offsetof(LayoutDescription, metadata_offset) ==
+ 2 * kUInt32Size);
+ STATIC_ASSERT(offsetof(LayoutDescription, metadata_length) ==
+ 3 * kUInt32Size);
+ STATIC_ASSERT(sizeof(LayoutDescription) == 4 * kUInt32Size);
// The layout of the blob is as follows:
//
- // metadata:
- // [0] hash of the remaining blob
- // [1] hash of embedded-blob-relevant heap objects
- // [2] metadata of instruction stream 0
- // ... metadata
+ // data:
+ // [0] hash of the data section
+ // [1] hash of the code section
+ // [2] hash of embedded-blob-relevant heap objects
+ // [3] layout description of instruction stream 0
+ // ... layout descriptions
+ // [x] metadata section of builtin 0
+ // ... metadata sections
//
// code:
- // [0] instruction streams 0
- // ... instruction streams
+ // [0] instruction section of builtin 0
+ // ... instruction sections
static constexpr uint32_t kTableSize = Builtins::builtin_count;
- static constexpr uint32_t EmbeddedBlobHashOffset() { return 0; }
- static constexpr uint32_t EmbeddedBlobHashSize() { return kSizetSize; }
+ static constexpr uint32_t EmbeddedBlobDataHashOffset() { return 0; }
+ static constexpr uint32_t EmbeddedBlobDataHashSize() { return kSizetSize; }
+ static constexpr uint32_t EmbeddedBlobCodeHashOffset() {
+ return EmbeddedBlobDataHashOffset() + EmbeddedBlobDataHashSize();
+ }
+ static constexpr uint32_t EmbeddedBlobCodeHashSize() { return kSizetSize; }
static constexpr uint32_t IsolateHashOffset() {
- return EmbeddedBlobHashOffset() + EmbeddedBlobHashSize();
+ return EmbeddedBlobCodeHashOffset() + EmbeddedBlobCodeHashSize();
}
static constexpr uint32_t IsolateHashSize() { return kSizetSize; }
- static constexpr uint32_t MetadataTableOffset() {
+ static constexpr uint32_t LayoutDescriptionTableOffset() {
return IsolateHashOffset() + IsolateHashSize();
}
- static constexpr uint32_t MetadataTableSize() {
- return sizeof(struct Metadata) * kTableSize;
+ static constexpr uint32_t LayoutDescriptionTableSize() {
+ return sizeof(struct LayoutDescription) * kTableSize;
+ }
+ static constexpr uint32_t FixedDataSize() {
+ return LayoutDescriptionTableOffset() + LayoutDescriptionTableSize();
}
+ // The variable-size data section starts here.
+ static constexpr uint32_t RawMetadataOffset() { return FixedDataSize(); }
+
+ // Code is in its own dedicated section.
static constexpr uint32_t RawCodeOffset() { return 0; }
private:
- EmbeddedData(const uint8_t* code, uint32_t code_size, const uint8_t* metadata,
- uint32_t metadata_size)
- : code_(code),
- code_size_(code_size),
- metadata_(metadata),
- metadata_size_(metadata_size) {
+ EmbeddedData(const uint8_t* code, uint32_t code_size, const uint8_t* data,
+ uint32_t data_size)
+ : code_(code), code_size_(code_size), data_(data), data_size_(data_size) {
DCHECK_NOT_NULL(code);
DCHECK_LT(0, code_size);
- DCHECK_NOT_NULL(metadata);
- DCHECK_LT(0, metadata_size);
+ DCHECK_NOT_NULL(data);
+ DCHECK_LT(0, data_size);
}
- const Metadata* Metadata() const {
- return reinterpret_cast<const struct Metadata*>(metadata_ +
- MetadataTableOffset());
- }
const uint8_t* RawCode() const { return code_ + RawCodeOffset(); }
- static constexpr int PadAndAlign(int size) {
+ const LayoutDescription* LayoutDescription() const {
+ return reinterpret_cast<const struct LayoutDescription*>(
+ data_ + LayoutDescriptionTableOffset());
+ }
+ const uint8_t* RawMetadata() const { return data_ + RawMetadataOffset(); }
+
+ static constexpr int PadAndAlignCode(int size) {
// Ensure we have at least one byte trailing the actual builtin
// instructions which we can later fill with int3.
return RoundUp<kCodeAlignment>(size + 1);
}
+ static constexpr int PadAndAlignData(int size) {
+ // Ensure we have at least one byte trailing the actual builtin
+ // instructions which we can later fill with int3.
+ return RoundUp<Code::kMetadataAlignment>(size);
+ }
void PrintStatistics() const;
- // This points to code for builtins. The contents are potentially unreadable
- // on platforms that disallow reads from the .text section.
+ // The code section contains instruction streams. It is guaranteed to have
+ // execute permissions, and may have read permissions.
const uint8_t* code_;
uint32_t code_size_;
- // This is metadata for the code.
- const uint8_t* metadata_;
- uint32_t metadata_size_;
+ // The data section contains both descriptions of the code section (hashes,
+ // offsets, sizes) and metadata describing Code objects (see
+ // Code::MetadataStart()). It is guaranteed to have read permissions.
+ const uint8_t* data_;
+ uint32_t data_size_;
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/embedded/embedded-empty.cc b/deps/v8/src/snapshot/embedded/embedded-empty.cc
index a407f904ed..c32b459d9d 100644
--- a/deps/v8/src/snapshot/embedded/embedded-empty.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-empty.cc
@@ -10,22 +10,22 @@
extern "C" const uint8_t* v8_Default_embedded_blob_code_;
extern "C" uint32_t v8_Default_embedded_blob_code_size_;
-extern "C" const uint8_t* v8_Default_embedded_blob_metadata_;
-extern "C" uint32_t v8_Default_embedded_blob_metadata_size_;
+extern "C" const uint8_t* v8_Default_embedded_blob_data_;
+extern "C" uint32_t v8_Default_embedded_blob_data_size_;
const uint8_t* v8_Default_embedded_blob_code_ = nullptr;
uint32_t v8_Default_embedded_blob_code_size_ = 0;
-const uint8_t* v8_Default_embedded_blob_metadata_ = nullptr;
-uint32_t v8_Default_embedded_blob_metadata_size_ = 0;
+const uint8_t* v8_Default_embedded_blob_data_ = nullptr;
+uint32_t v8_Default_embedded_blob_data_size_ = 0;
#ifdef V8_MULTI_SNAPSHOTS
extern "C" const uint8_t* v8_Trusted_embedded_blob_code_;
extern "C" uint32_t v8_Trusted_embedded_blob_code_size_;
-extern "C" const uint8_t* v8_Trusted_embedded_blob_metadata_;
-extern "C" uint32_t v8_Trusted_embedded_blob_metadata_size_;
+extern "C" const uint8_t* v8_Trusted_embedded_blob_data_;
+extern "C" uint32_t v8_Trusted_embedded_blob_data_size_;
const uint8_t* v8_Trusted_embedded_blob_code_ = nullptr;
uint32_t v8_Trusted_embedded_blob_code_size_ = 0;
-const uint8_t* v8_Trusted_embedded_blob_metadata_ = nullptr;
-uint32_t v8_Trusted_embedded_blob_metadata_size_ = 0;
+const uint8_t* v8_Trusted_embedded_blob_data_ = nullptr;
+uint32_t v8_Trusted_embedded_blob_data_size_ = 0;
#endif
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
index 8a3e248d6b..b472841cc6 100644
--- a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
@@ -14,6 +14,38 @@
namespace v8 {
namespace internal {
+namespace {
+
+int WriteDirectiveOrSeparator(PlatformEmbeddedFileWriterBase* w,
+ int current_line_length,
+ DataDirective directive) {
+ int printed_chars;
+ if (current_line_length == 0) {
+ printed_chars = w->IndentedDataDirective(directive);
+ DCHECK_LT(0, printed_chars);
+ } else {
+ printed_chars = fprintf(w->fp(), ",");
+ DCHECK_EQ(1, printed_chars);
+ }
+ return current_line_length + printed_chars;
+}
+
+int WriteLineEndIfNeeded(PlatformEmbeddedFileWriterBase* w,
+ int current_line_length, int write_size) {
+ static const int kTextWidth = 100;
+ // Check if adding ',0xFF...FF\n"' would force a line wrap. This doesn't use
+ // the actual size of the string to be written to determine this so it's
+ // more conservative than strictly needed.
+ if (current_line_length + strlen(",0x") + write_size * 2 > kTextWidth) {
+ fprintf(w->fp(), "\n");
+ return 0;
+ } else {
+ return current_line_length;
+ }
+}
+
+} // namespace
+
void EmbeddedFileWriter::WriteBuiltin(PlatformEmbeddedFileWriterBase* w,
const i::EmbeddedData* blob,
const int builtin_id) const {
@@ -98,6 +130,40 @@ void EmbeddedFileWriter::WriteBuiltinLabels(PlatformEmbeddedFileWriterBase* w,
w->DeclareLabel(name.c_str());
}
+void EmbeddedFileWriter::WriteCodeSection(PlatformEmbeddedFileWriterBase* w,
+ const i::EmbeddedData* blob) const {
+ w->Comment(
+ "The embedded blob code section starts here. It contains the builtin");
+ w->Comment("instruction streams.");
+ w->SectionText();
+
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+ // UMA needs an exposed function-type label at the start of the embedded
+ // code section.
+ static const char* kCodeStartForProfilerSymbolName =
+ "v8_code_start_for_profiler_";
+ static constexpr int kDummyFunctionLength = 1;
+ static constexpr int kDummyFunctionData = 0xcc;
+ w->DeclareFunctionBegin(kCodeStartForProfilerSymbolName,
+ kDummyFunctionLength);
+ // The label must not be at the same address as the first builtin, insert
+ // padding bytes.
+ WriteDirectiveOrSeparator(w, 0, kByte);
+ w->HexLiteral(kDummyFunctionData);
+ w->Newline();
+ w->DeclareFunctionEnd(kCodeStartForProfilerSymbolName);
+#endif
+
+ w->AlignToCodeAlignment();
+ w->DeclareLabel(EmbeddedBlobCodeDataSymbol().c_str());
+
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
+ for (int i = 0; i < i::Builtins::builtin_count; i++) {
+ WriteBuiltin(w, blob, i);
+ }
+ w->Newline();
+}
+
void EmbeddedFileWriter::WriteFileEpilogue(PlatformEmbeddedFileWriterBase* w,
const i::EmbeddedData* blob) const {
{
@@ -112,15 +178,14 @@ void EmbeddedFileWriter::WriteFileEpilogue(PlatformEmbeddedFileWriterBase* w,
EmbeddedBlobCodeDataSymbol().c_str());
w->Newline();
- i::EmbeddedVector<char, kTemporaryStringLength>
- embedded_blob_metadata_symbol;
- i::SNPrintF(embedded_blob_metadata_symbol, "v8_%s_embedded_blob_metadata_",
+ i::EmbeddedVector<char, kTemporaryStringLength> embedded_blob_data_symbol;
+ i::SNPrintF(embedded_blob_data_symbol, "v8_%s_embedded_blob_data_",
embedded_variant_);
- w->Comment("Pointer to the beginning of the embedded blob metadata.");
+ w->Comment("Pointer to the beginning of the embedded blob data section.");
w->AlignToDataAlignment();
- w->DeclarePointerToSymbol(embedded_blob_metadata_symbol.begin(),
- EmbeddedBlobMetadataDataSymbol().c_str());
+ w->DeclarePointerToSymbol(embedded_blob_data_symbol.begin(),
+ EmbeddedBlobDataDataSymbol().c_str());
w->Newline();
}
@@ -137,13 +202,12 @@ void EmbeddedFileWriter::WriteFileEpilogue(PlatformEmbeddedFileWriterBase* w,
w->Newline();
i::EmbeddedVector<char, kTemporaryStringLength>
- embedded_blob_metadata_size_symbol;
- i::SNPrintF(embedded_blob_metadata_size_symbol,
- "v8_%s_embedded_blob_metadata_size_", embedded_variant_);
+ embedded_blob_data_size_symbol;
+ i::SNPrintF(embedded_blob_data_size_symbol,
+ "v8_%s_embedded_blob_data_size_", embedded_variant_);
- w->Comment("The size of the embedded blob metadata in bytes.");
- w->DeclareUint32(embedded_blob_metadata_size_symbol.begin(),
- blob->metadata_size());
+ w->Comment("The size of the embedded blob data section in bytes.");
+ w->DeclareUint32(embedded_blob_data_size_symbol.begin(), blob->data_size());
w->Newline();
}
@@ -162,38 +226,6 @@ void EmbeddedFileWriter::WriteFileEpilogue(PlatformEmbeddedFileWriterBase* w,
w->FileEpilogue();
}
-namespace {
-
-int WriteDirectiveOrSeparator(PlatformEmbeddedFileWriterBase* w,
- int current_line_length,
- DataDirective directive) {
- int printed_chars;
- if (current_line_length == 0) {
- printed_chars = w->IndentedDataDirective(directive);
- DCHECK_LT(0, printed_chars);
- } else {
- printed_chars = fprintf(w->fp(), ",");
- DCHECK_EQ(1, printed_chars);
- }
- return current_line_length + printed_chars;
-}
-
-int WriteLineEndIfNeeded(PlatformEmbeddedFileWriterBase* w,
- int current_line_length, int write_size) {
- static const int kTextWidth = 100;
- // Check if adding ',0xFF...FF\n"' would force a line wrap. This doesn't use
- // the actual size of the string to be written to determine this so it's
- // more conservative than strictly needed.
- if (current_line_length + strlen(",0x") + write_size * 2 > kTextWidth) {
- fprintf(w->fp(), "\n");
- return 0;
- } else {
- return current_line_length;
- }
-}
-
-} // namespace
-
// static
void EmbeddedFileWriter::WriteBinaryContentsAsInlineAssembly(
PlatformEmbeddedFileWriterBase* w, const uint8_t* data, uint32_t size) {
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer.h b/deps/v8/src/snapshot/embedded/embedded-file-writer.h
index f1ca04170a..6a90e5c685 100644
--- a/deps/v8/src/snapshot/embedded/embedded-file-writer.h
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer.h
@@ -109,8 +109,8 @@ class EmbeddedFileWriter : public EmbeddedFileWriterInterface {
WriteFilePrologue(writer.get());
WriteExternalFilenames(writer.get());
- WriteMetadataSection(writer.get(), blob);
- WriteInstructionStreams(writer.get(), blob);
+ WriteDataSection(writer.get(), blob);
+ WriteCodeSection(writer.get(), blob);
WriteFileEpilogue(writer.get(), blob);
fclose(fp);
@@ -161,23 +161,22 @@ class EmbeddedFileWriter : public EmbeddedFileWriterInterface {
return std::string{embedded_blob_code_data_symbol.begin()};
}
- std::string EmbeddedBlobMetadataDataSymbol() const {
+ std::string EmbeddedBlobDataDataSymbol() const {
i::EmbeddedVector<char, kTemporaryStringLength>
- embedded_blob_metadata_data_symbol;
- i::SNPrintF(embedded_blob_metadata_data_symbol,
- "v8_%s_embedded_blob_metadata_data_", embedded_variant_);
- return std::string{embedded_blob_metadata_data_symbol.begin()};
+ embedded_blob_data_data_symbol;
+ i::SNPrintF(embedded_blob_data_data_symbol,
+ "v8_%s_embedded_blob_data_data_", embedded_variant_);
+ return std::string{embedded_blob_data_data_symbol.begin()};
}
- void WriteMetadataSection(PlatformEmbeddedFileWriterBase* w,
- const i::EmbeddedData* blob) const {
- w->Comment("The embedded blob metadata starts here.");
+ void WriteDataSection(PlatformEmbeddedFileWriterBase* w,
+ const i::EmbeddedData* blob) const {
+ w->Comment("The embedded blob data section starts here.");
w->SectionRoData();
w->AlignToDataAlignment();
- w->DeclareLabel(EmbeddedBlobMetadataDataSymbol().c_str());
+ w->DeclareLabel(EmbeddedBlobDataDataSymbol().c_str());
- WriteBinaryContentsAsInlineAssembly(w, blob->metadata(),
- blob->metadata_size());
+ WriteBinaryContentsAsInlineAssembly(w, blob->data(), blob->data_size());
}
void WriteBuiltin(PlatformEmbeddedFileWriterBase* w,
@@ -186,21 +185,8 @@ class EmbeddedFileWriter : public EmbeddedFileWriterInterface {
void WriteBuiltinLabels(PlatformEmbeddedFileWriterBase* w,
std::string name) const;
- void WriteInstructionStreams(PlatformEmbeddedFileWriterBase* w,
- const i::EmbeddedData* blob) const {
- w->Comment("The embedded blob data starts here. It contains the builtin");
- w->Comment("instruction streams.");
- w->SectionText();
- w->AlignToCodeAlignment();
- w->DeclareLabel(EmbeddedBlobCodeDataSymbol().c_str());
-
- for (int i = 0; i < i::Builtins::builtin_count; i++) {
- if (!blob->ContainsBuiltin(i)) continue;
-
- WriteBuiltin(w, blob, i);
- }
- w->Newline();
- }
+ void WriteCodeSection(PlatformEmbeddedFileWriterBase* w,
+ const i::EmbeddedData* blob) const;
void WriteFileEpilogue(PlatformEmbeddedFileWriterBase* w,
const i::EmbeddedData* blob) const;
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
index 1c823ef421..4065e4a7eb 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
@@ -4,6 +4,8 @@
#include "src/snapshot/embedded/platform-embedded-file-writer-aix.h"
+#include "src/objects/code.h"
+
namespace v8 {
namespace internal {
@@ -63,10 +65,12 @@ void PlatformEmbeddedFileWriterAIX::DeclareSymbolGlobal(const char* name) {
}
void PlatformEmbeddedFileWriterAIX::AlignToCodeAlignment() {
+ STATIC_ASSERT((1 << 5) >= kCodeAlignment);
fprintf(fp_, ".align 5\n");
}
void PlatformEmbeddedFileWriterAIX::AlignToDataAlignment() {
+ STATIC_ASSERT((1 << 3) >= Code::kMetadataAlignment);
fprintf(fp_, ".align 3\n");
}
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
index 070aaf51b6..8acfd0d176 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
@@ -8,6 +8,7 @@
#include <cinttypes>
#include "src/common/globals.h"
+#include "src/objects/code.h"
namespace v8 {
namespace internal {
@@ -73,6 +74,7 @@ void PlatformEmbeddedFileWriterGeneric::DeclareSymbolGlobal(const char* name) {
}
void PlatformEmbeddedFileWriterGeneric::AlignToCodeAlignment() {
+ STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".balign 32\n");
}
@@ -81,6 +83,7 @@ void PlatformEmbeddedFileWriterGeneric::AlignToDataAlignment() {
// instructions are used to retrieve v8_Default_embedded_blob_ and/or
// v8_Default_embedded_blob_size_. The generated instructions require the
// load target to be aligned at 8 bytes (2^3).
+ STATIC_ASSERT(8 >= Code::kMetadataAlignment);
fprintf(fp_, ".balign 8\n");
}
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
index 9c5bf7049d..5fa12ec6ea 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
@@ -4,6 +4,8 @@
#include "src/snapshot/embedded/platform-embedded-file-writer-mac.h"
+#include "src/objects/code.h"
+
namespace v8 {
namespace internal {
@@ -54,6 +56,7 @@ void PlatformEmbeddedFileWriterMac::DeclareSymbolGlobal(const char* name) {
// prevents something along the compilation chain from messing with the
// embedded blob. Using .global here causes embedded blob hash verification
// failures at runtime.
+ STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".private_extern _%s\n", name);
}
@@ -62,6 +65,7 @@ void PlatformEmbeddedFileWriterMac::AlignToCodeAlignment() {
}
void PlatformEmbeddedFileWriterMac::AlignToDataAlignment() {
+ STATIC_ASSERT(8 >= Code::kMetadataAlignment);
fprintf(fp_, ".balign 8\n");
}
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
index e3250084c4..891dbd94d3 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
@@ -109,12 +109,12 @@ void EmitUnwindData(PlatformEmbeddedFileWriterWin* w,
w->Comment(" UnwindInfoAddress");
w->StartPdataSection();
{
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
Address prev_builtin_end_offset = 0;
for (int i = 0; i < Builtins::builtin_count; i++) {
// Some builtins are leaf functions from the point of view of Win64 stack
// walking: they do not move the stack pointer and do not require a PDATA
// entry because the return address can be retrieved from [rsp].
- if (!blob->ContainsBuiltin(i)) continue;
if (unwind_infos[i].is_leaf_function()) continue;
uint64_t builtin_start_offset = blob->InstructionStartOfBuiltin(i) -
@@ -193,8 +193,8 @@ void EmitUnwindData(PlatformEmbeddedFileWriterWin* w,
std::vector<int> code_chunks;
std::vector<win64_unwindinfo::FrameOffsets> fp_adjustments;
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (int i = 0; i < Builtins::builtin_count; i++) {
- if (!blob->ContainsBuiltin(i)) continue;
if (unwind_infos[i].is_leaf_function()) continue;
uint64_t builtin_start_offset = blob->InstructionStartOfBuiltin(i) -
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index e8bc45a4a2..5747f705ae 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -15,18 +15,20 @@
namespace v8 {
namespace internal {
-ObjectDeserializer::ObjectDeserializer(const SerializedCodeData* data)
- : Deserializer(data, true) {}
+ObjectDeserializer::ObjectDeserializer(Isolate* isolate,
+ const SerializedCodeData* data)
+ : Deserializer(isolate, data->Payload(), data->GetMagicNumber(), true,
+ false) {}
MaybeHandle<SharedFunctionInfo>
ObjectDeserializer::DeserializeSharedFunctionInfo(
Isolate* isolate, const SerializedCodeData* data, Handle<String> source) {
- ObjectDeserializer d(data);
+ ObjectDeserializer d(isolate, data);
d.AddAttachedObject(source);
Handle<HeapObject> result;
- return d.Deserialize(isolate).ToHandle(&result)
+ return d.Deserialize().ToHandle(&result)
? Handle<SharedFunctionInfo>::cast(result)
: MaybeHandle<SharedFunctionInfo>();
}
@@ -39,23 +41,17 @@ ObjectDeserializer::DeserializeSharedFunctionInfoOffThread(
UNREACHABLE();
}
-MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
- Initialize(isolate);
- if (!allocator()->ReserveSpace()) return MaybeHandle<HeapObject>();
-
+MaybeHandle<HeapObject> ObjectDeserializer::Deserialize() {
DCHECK(deserializing_user_code());
- HandleScope scope(isolate);
+ HandleScope scope(isolate());
Handle<HeapObject> result;
{
- DisallowGarbageCollection no_gc;
- Object root;
- VisitRootPointer(Root::kStartupObjectCache, nullptr, FullObjectSlot(&root));
+ result = ReadObject();
DeserializeDeferredObjects();
CHECK(new_code_objects().empty());
LinkAllocationSites();
- LogNewMapEvents();
- result = handle(HeapObject::cast(root), isolate);
- allocator()->RegisterDeserializedObjectsForBlackAllocation();
+ CHECK(new_maps().empty());
+ WeakenDescriptorArrays();
}
Rehash();
@@ -77,10 +73,10 @@ void ObjectDeserializer::CommitPostProcessedObjects() {
script->set_id(isolate()->GetNextScriptId());
LogScriptEvents(*script);
// Add script to list.
- Handle<WeakArrayList> list = isolate()->factory()->script_list();
- list = WeakArrayList::AddToEnd(isolate(), list,
- MaybeObjectHandle::Weak(script));
- isolate()->heap()->SetRootScriptList(*list);
+ Handle<WeakArrayList> list = isolate()->factory()->script_list();
+ list = WeakArrayList::AddToEnd(isolate(), list,
+ MaybeObjectHandle::Weak(script));
+ isolate()->heap()->SetRootScriptList(*list);
}
}
@@ -89,17 +85,17 @@ void ObjectDeserializer::LinkAllocationSites() {
Heap* heap = isolate()->heap();
// Allocation sites are present in the snapshot, and must be linked into
// a list at deserialization time.
- for (AllocationSite site : new_allocation_sites()) {
- if (!site.HasWeakNext()) continue;
+ for (Handle<AllocationSite> site : new_allocation_sites()) {
+ if (!site->HasWeakNext()) continue;
// TODO(mvstanton): consider treating the heap()->allocation_sites_list()
// as a (weak) root. If this root is relocated correctly, this becomes
// unnecessary.
if (heap->allocation_sites_list() == Smi::zero()) {
- site.set_weak_next(ReadOnlyRoots(heap).undefined_value());
+ site->set_weak_next(ReadOnlyRoots(heap).undefined_value());
} else {
- site.set_weak_next(heap->allocation_sites_list());
+ site->set_weak_next(heap->allocation_sites_list());
}
- heap->set_allocation_sites_list(site);
+ heap->set_allocation_sites_list(*site);
}
}
diff --git a/deps/v8/src/snapshot/object-deserializer.h b/deps/v8/src/snapshot/object-deserializer.h
index f155ca8d07..6ba79147f5 100644
--- a/deps/v8/src/snapshot/object-deserializer.h
+++ b/deps/v8/src/snapshot/object-deserializer.h
@@ -23,10 +23,10 @@ class ObjectDeserializer final : public Deserializer {
Handle<String> source);
private:
- explicit ObjectDeserializer(const SerializedCodeData* data);
+ explicit ObjectDeserializer(Isolate* isolate, const SerializedCodeData* data);
// Deserialize an object graph. Fail gracefully.
- MaybeHandle<HeapObject> Deserialize(Isolate* isolate);
+ MaybeHandle<HeapObject> Deserialize();
void LinkAllocationSites();
void CommitPostProcessedObjects();
diff --git a/deps/v8/src/snapshot/read-only-deserializer.cc b/deps/v8/src/snapshot/read-only-deserializer.cc
index 7d1ff90b8c..c8a6651eb7 100644
--- a/deps/v8/src/snapshot/read-only-deserializer.cc
+++ b/deps/v8/src/snapshot/read-only-deserializer.cc
@@ -14,29 +14,24 @@
namespace v8 {
namespace internal {
-void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) {
- Initialize(isolate);
+void ReadOnlyDeserializer::DeserializeIntoIsolate() {
+ HandleScope scope(isolate());
- if (!allocator()->ReserveSpace()) {
- V8::FatalProcessOutOfMemory(isolate, "ReadOnlyDeserializer");
- }
-
- ReadOnlyHeap* ro_heap = isolate->read_only_heap();
+ ReadOnlyHeap* ro_heap = isolate()->read_only_heap();
// No active threads.
- DCHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
+ DCHECK_NULL(isolate()->thread_manager()->FirstThreadStateInUse());
// No active handles.
- DCHECK(isolate->handle_scope_implementer()->blocks()->empty());
+ DCHECK(isolate()->handle_scope_implementer()->blocks()->empty());
// Read-only object cache is not yet populated.
DCHECK(!ro_heap->read_only_object_cache_is_initialized());
// Startup object cache is not yet populated.
- DCHECK(isolate->startup_object_cache()->empty());
+ DCHECK(isolate()->startup_object_cache()->empty());
// Builtins are not yet created.
- DCHECK(!isolate->builtins()->is_initialized());
+ DCHECK(!isolate()->builtins()->is_initialized());
{
- DisallowGarbageCollection no_gc;
- ReadOnlyRoots roots(isolate);
+ ReadOnlyRoots roots(isolate());
roots.Iterate(this);
ro_heap->read_only_space()->RepairFreeSpacesAfterDeserialization();
@@ -55,7 +50,7 @@ void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) {
}
if (FLAG_rehash_snapshot && can_rehash()) {
- isolate->heap()->InitializeHashSeed();
+ isolate()->heap()->InitializeHashSeed();
Rehash();
}
}
diff --git a/deps/v8/src/snapshot/read-only-deserializer.h b/deps/v8/src/snapshot/read-only-deserializer.h
index 08443766c2..c546c234ff 100644
--- a/deps/v8/src/snapshot/read-only-deserializer.h
+++ b/deps/v8/src/snapshot/read-only-deserializer.h
@@ -6,6 +6,7 @@
#define V8_SNAPSHOT_READ_ONLY_DESERIALIZER_H_
#include "src/snapshot/deserializer.h"
+#include "src/snapshot/snapshot-data.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
@@ -15,11 +16,13 @@ namespace internal {
// Read-only object cache used by the other deserializers.
class ReadOnlyDeserializer final : public Deserializer {
public:
- explicit ReadOnlyDeserializer(const SnapshotData* data)
- : Deserializer(data, false) {}
+ explicit ReadOnlyDeserializer(Isolate* isolate, const SnapshotData* data,
+ bool can_rehash)
+ : Deserializer(isolate, data->Payload(), data->GetMagicNumber(), false,
+ can_rehash) {}
// Deserialize the snapshot into an empty heap.
- void DeserializeInto(Isolate* isolate);
+ void DeserializeIntoIsolate();
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/read-only-serializer.cc b/deps/v8/src/snapshot/read-only-serializer.cc
index 4b852c0656..06c5094782 100644
--- a/deps/v8/src/snapshot/read-only-serializer.cc
+++ b/deps/v8/src/snapshot/read-only-serializer.cc
@@ -18,32 +18,51 @@ namespace internal {
ReadOnlySerializer::ReadOnlySerializer(Isolate* isolate,
Snapshot::SerializerFlags flags)
- : RootsSerializer(isolate, flags, RootIndex::kFirstReadOnlyRoot) {
+ : RootsSerializer(isolate, flags, RootIndex::kFirstReadOnlyRoot)
+#ifdef DEBUG
+ ,
+ serialized_objects_(isolate->heap()),
+ did_serialize_not_mapped_symbol_(false)
+#endif
+{
STATIC_ASSERT(RootIndex::kFirstReadOnlyRoot == RootIndex::kFirstRoot);
- allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
}
ReadOnlySerializer::~ReadOnlySerializer() {
OutputStatistics("ReadOnlySerializer");
}
-void ReadOnlySerializer::SerializeObject(HeapObject obj) {
- CHECK(ReadOnlyHeap::Contains(obj));
- CHECK_IMPLIES(obj.IsString(), obj.IsInternalizedString());
-
- if (SerializeHotObject(obj)) return;
- if (IsRootAndHasBeenSerialized(obj) && SerializeRoot(obj)) {
- return;
+void ReadOnlySerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
+ CHECK(ReadOnlyHeap::Contains(*obj));
+ CHECK_IMPLIES(obj->IsString(), obj->IsInternalizedString());
+
+ // There should be no references to the not_mapped_symbol except for the entry
+ // in the root table, so don't try to serialize a reference and rely on the
+ // below CHECK(!did_serialize_not_mapped_symbol_) to make sure it doesn't
+ // serialize twice.
+ if (*obj != ReadOnlyRoots(isolate()).not_mapped_symbol()) {
+ if (SerializeHotObject(obj)) return;
+ if (IsRootAndHasBeenSerialized(*obj) && SerializeRoot(obj)) {
+ return;
+ }
+ if (SerializeBackReference(obj)) return;
}
- if (SerializeBackReference(obj)) return;
- CheckRehashability(obj);
+ CheckRehashability(*obj);
// Object has not yet been serialized. Serialize it here.
ObjectSerializer object_serializer(this, obj, &sink_);
object_serializer.Serialize();
#ifdef DEBUG
- serialized_objects_.insert(obj);
+ if (*obj == ReadOnlyRoots(isolate()).not_mapped_symbol()) {
+ CHECK(!did_serialize_not_mapped_symbol_);
+ did_serialize_not_mapped_symbol_ = true;
+ } else {
+ CHECK_NULL(serialized_objects_.Find(obj));
+ // There's no "IdentitySet", so use an IdentityMap with a value that is
+ // later ignored.
+ serialized_objects_.Insert(obj, 0);
+ }
#endif
}
@@ -73,7 +92,11 @@ void ReadOnlySerializer::FinalizeSerialization() {
ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap());
for (HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
- CHECK(serialized_objects_.count(object));
+ if (object == ReadOnlyRoots(isolate()).not_mapped_symbol()) {
+ CHECK(did_serialize_not_mapped_symbol_);
+ } else {
+ CHECK_NOT_NULL(serialized_objects_.Find(object));
+ }
}
#endif
}
@@ -92,8 +115,8 @@ bool ReadOnlySerializer::MustBeDeferred(HeapObject object) {
}
bool ReadOnlySerializer::SerializeUsingReadOnlyObjectCache(
- SnapshotByteSink* sink, HeapObject obj) {
- if (!ReadOnlyHeap::Contains(obj)) return false;
+ SnapshotByteSink* sink, Handle<HeapObject> obj) {
+ if (!ReadOnlyHeap::Contains(*obj)) return false;
// Get the cache index and serialize it into the read-only snapshot if
// necessary.
diff --git a/deps/v8/src/snapshot/read-only-serializer.h b/deps/v8/src/snapshot/read-only-serializer.h
index f30b2c30ba..fd88b9f7b6 100644
--- a/deps/v8/src/snapshot/read-only-serializer.h
+++ b/deps/v8/src/snapshot/read-only-serializer.h
@@ -7,6 +7,7 @@
#include <unordered_set>
+#include "src/base/hashmap.h"
#include "src/snapshot/roots-serializer.h"
namespace v8 {
@@ -19,6 +20,8 @@ class V8_EXPORT_PRIVATE ReadOnlySerializer : public RootsSerializer {
public:
ReadOnlySerializer(Isolate* isolate, Snapshot::SerializerFlags flags);
~ReadOnlySerializer() override;
+ ReadOnlySerializer(const ReadOnlySerializer&) = delete;
+ ReadOnlySerializer& operator=(const ReadOnlySerializer&) = delete;
void SerializeReadOnlyRoots();
@@ -31,16 +34,16 @@ class V8_EXPORT_PRIVATE ReadOnlySerializer : public RootsSerializer {
// ReadOnlyObjectCache bytecode into |sink|. Returns whether this was
// successful.
bool SerializeUsingReadOnlyObjectCache(SnapshotByteSink* sink,
- HeapObject obj);
+ Handle<HeapObject> obj);
private:
- void SerializeObject(HeapObject o) override;
+ void SerializeObjectImpl(Handle<HeapObject> o) override;
bool MustBeDeferred(HeapObject object) override;
#ifdef DEBUG
- std::unordered_set<HeapObject, Object::Hasher> serialized_objects_;
+ IdentityMap<int, base::DefaultAllocationPolicy> serialized_objects_;
+ bool did_serialize_not_mapped_symbol_;
#endif
- DISALLOW_COPY_AND_ASSIGN(ReadOnlySerializer);
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/references.h b/deps/v8/src/snapshot/references.h
index eed4def1ef..ecaedc41d4 100644
--- a/deps/v8/src/snapshot/references.h
+++ b/deps/v8/src/snapshot/references.h
@@ -8,78 +8,42 @@
#include "src/base/bit-field.h"
#include "src/base/hashmap.h"
#include "src/common/assert-scope.h"
+#include "src/execution/isolate.h"
+#include "src/utils/identity-map.h"
namespace v8 {
namespace internal {
-// TODO(goszczycki): Move this somewhere every file in src/snapshot can use it.
-// The spaces suported by the serializer. Spaces after LO_SPACE (NEW_LO_SPACE
-// and CODE_LO_SPACE) are not supported.
enum class SnapshotSpace : byte {
- kReadOnlyHeap = RO_SPACE,
- kOld = OLD_SPACE,
- kCode = CODE_SPACE,
- kMap = MAP_SPACE,
- kLargeObject = LO_SPACE,
- kNumberOfPreallocatedSpaces = kCode + 1,
- kNumberOfSpaces = kLargeObject + 1,
- kSpecialValueSpace = kNumberOfSpaces,
- // Number of spaces which should be allocated by the heap. Eventually
- // kReadOnlyHeap will move to the end of this enum and this will be equal to
- // it.
- kNumberOfHeapSpaces = kNumberOfSpaces,
+ kReadOnlyHeap,
+ kOld,
+ kCode,
+ kMap,
};
-
-constexpr bool IsPreAllocatedSpace(SnapshotSpace space) {
- return static_cast<int>(space) <
- static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
-}
+static constexpr int kNumberOfSnapshotSpaces =
+ static_cast<int>(SnapshotSpace::kMap) + 1;
class SerializerReference {
private:
enum SpecialValueType {
- kInvalidValue,
+ kBackReference,
kAttachedReference,
kOffHeapBackingStore,
kBuiltinReference,
};
- STATIC_ASSERT(static_cast<int>(SnapshotSpace::kSpecialValueSpace) <
- (1 << kSpaceTagSize));
-
SerializerReference(SpecialValueType type, uint32_t value)
- : bitfield_(SpaceBits::encode(SnapshotSpace::kSpecialValueSpace) |
- SpecialValueTypeBits::encode(type)),
- value_(value) {}
+ : bit_field_(TypeBits::encode(type) | ValueBits::encode(value)) {}
public:
- SerializerReference() : SerializerReference(kInvalidValue, 0) {}
-
- SerializerReference(SnapshotSpace space, uint32_t chunk_index,
- uint32_t chunk_offset)
- : bitfield_(SpaceBits::encode(space) |
- ChunkIndexBits::encode(chunk_index)),
- value_(chunk_offset) {}
-
- static SerializerReference BackReference(SnapshotSpace space,
- uint32_t chunk_index,
- uint32_t chunk_offset) {
- DCHECK(IsAligned(chunk_offset, kObjectAlignment));
- return SerializerReference(space, chunk_index, chunk_offset);
- }
-
- static SerializerReference MapReference(uint32_t index) {
- return SerializerReference(SnapshotSpace::kMap, 0, index);
+ static SerializerReference BackReference(uint32_t index) {
+ return SerializerReference(kBackReference, index);
}
static SerializerReference OffHeapBackingStoreReference(uint32_t index) {
return SerializerReference(kOffHeapBackingStore, index);
}
- static SerializerReference LargeObjectReference(uint32_t index) {
- return SerializerReference(SnapshotSpace::kLargeObject, 0, index);
- }
-
static SerializerReference AttachedReference(uint32_t index) {
return SerializerReference(kAttachedReference, index);
}
@@ -88,127 +52,94 @@ class SerializerReference {
return SerializerReference(kBuiltinReference, index);
}
- bool is_valid() const {
- return SpaceBits::decode(bitfield_) != SnapshotSpace::kSpecialValueSpace ||
- SpecialValueTypeBits::decode(bitfield_) != kInvalidValue;
- }
-
bool is_back_reference() const {
- return SpaceBits::decode(bitfield_) != SnapshotSpace::kSpecialValueSpace;
+ return TypeBits::decode(bit_field_) == kBackReference;
}
- SnapshotSpace space() const {
+ uint32_t back_ref_index() const {
DCHECK(is_back_reference());
- return SpaceBits::decode(bitfield_);
- }
-
- uint32_t chunk_offset() const {
- DCHECK(is_back_reference());
- return value_;
- }
-
- uint32_t chunk_index() const {
- DCHECK(IsPreAllocatedSpace(space()));
- return ChunkIndexBits::decode(bitfield_);
- }
-
- uint32_t map_index() const {
- DCHECK_EQ(SnapshotSpace::kMap, SpaceBits::decode(bitfield_));
- return value_;
+ return ValueBits::decode(bit_field_);
}
bool is_off_heap_backing_store_reference() const {
- return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace &&
- SpecialValueTypeBits::decode(bitfield_) == kOffHeapBackingStore;
+ return TypeBits::decode(bit_field_) == kOffHeapBackingStore;
}
uint32_t off_heap_backing_store_index() const {
DCHECK(is_off_heap_backing_store_reference());
- return value_;
- }
-
- uint32_t large_object_index() const {
- DCHECK_EQ(SnapshotSpace::kLargeObject, SpaceBits::decode(bitfield_));
- return value_;
+ return ValueBits::decode(bit_field_);
}
bool is_attached_reference() const {
- return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace &&
- SpecialValueTypeBits::decode(bitfield_) == kAttachedReference;
+ return TypeBits::decode(bit_field_) == kAttachedReference;
}
uint32_t attached_reference_index() const {
DCHECK(is_attached_reference());
- return value_;
+ return ValueBits::decode(bit_field_);
}
bool is_builtin_reference() const {
- return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace &&
- SpecialValueTypeBits::decode(bitfield_) == kBuiltinReference;
+ return TypeBits::decode(bit_field_) == kBuiltinReference;
}
uint32_t builtin_index() const {
DCHECK(is_builtin_reference());
- return value_;
+ return ValueBits::decode(bit_field_);
}
private:
- using SpaceBits = base::BitField<SnapshotSpace, 0, kSpaceTagSize>;
- using ChunkIndexBits = SpaceBits::Next<uint32_t, 32 - kSpaceTagSize>;
- using SpecialValueTypeBits =
- SpaceBits::Next<SpecialValueType, 32 - kSpaceTagSize>;
-
- // We use two fields to store a reference.
- // In case of a normal back reference, the bitfield_ stores the space and
- // the chunk index. In case of special references, it uses a special value
- // for space and stores the special value type.
- uint32_t bitfield_;
- // value_ stores either chunk offset or special value.
- uint32_t value_;
+ using TypeBits = base::BitField<SpecialValueType, 0, 2>;
+ using ValueBits = TypeBits::Next<uint32_t, 32 - TypeBits::kSize>;
+
+ uint32_t bit_field_;
friend class SerializerReferenceMap;
};
-class SerializerReferenceMap
- : public base::TemplateHashMapImpl<uintptr_t, SerializerReference,
- base::KeyEqualityMatcher<intptr_t>,
- base::DefaultAllocationPolicy> {
+// SerializerReference has to fit in an IdentityMap value field.
+STATIC_ASSERT(sizeof(SerializerReference) <= sizeof(void*));
+
+class SerializerReferenceMap {
public:
- using Entry = base::TemplateHashMapEntry<uintptr_t, SerializerReference>;
+ explicit SerializerReferenceMap(Isolate* isolate)
+ : map_(isolate->heap()), attached_reference_index_(0) {}
+
+ const SerializerReference* LookupReference(HeapObject object) const {
+ return map_.Find(object);
+ }
+
+ const SerializerReference* LookupReference(Handle<HeapObject> object) const {
+ return map_.Find(object);
+ }
- SerializerReferenceMap() : attached_reference_index_(0) {}
+ const SerializerReference* LookupBackingStore(void* backing_store) const {
+ auto it = backing_store_map_.find(backing_store);
+ if (it == backing_store_map_.end()) return nullptr;
+ return &it->second;
+ }
- SerializerReference LookupReference(void* value) const {
- uintptr_t key = Key(value);
- Entry* entry = Lookup(key, Hash(key));
- if (entry == nullptr) return SerializerReference();
- return entry->value;
+ void Add(HeapObject object, SerializerReference reference) {
+ DCHECK_NULL(LookupReference(object));
+ map_.Insert(object, reference);
}
- void Add(void* obj, SerializerReference reference) {
- DCHECK(reference.is_valid());
- DCHECK(!LookupReference(obj).is_valid());
- uintptr_t key = Key(obj);
- LookupOrInsert(key, Hash(key))->value = reference;
+ void AddBackingStore(void* backing_store, SerializerReference reference) {
+ DCHECK(backing_store_map_.find(backing_store) == backing_store_map_.end());
+ backing_store_map_.emplace(backing_store, reference);
}
- SerializerReference AddAttachedReference(void* attached_reference) {
+ SerializerReference AddAttachedReference(HeapObject object) {
SerializerReference reference =
SerializerReference::AttachedReference(attached_reference_index_++);
- Add(attached_reference, reference);
+ map_.Insert(object, reference);
return reference;
}
private:
- static inline uintptr_t Key(void* value) {
- return reinterpret_cast<uintptr_t>(value);
- }
-
- static uint32_t Hash(uintptr_t key) { return static_cast<uint32_t>(key); }
-
- DISALLOW_HEAP_ALLOCATION(no_allocation_)
+ IdentityMap<SerializerReference, base::DefaultAllocationPolicy> map_;
+ std::unordered_map<void*, SerializerReference> backing_store_map_;
int attached_reference_index_;
- DISALLOW_COPY_AND_ASSIGN(SerializerReferenceMap);
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/roots-serializer.cc b/deps/v8/src/snapshot/roots-serializer.cc
index 6a8f2bb05e..7e459ee811 100644
--- a/deps/v8/src/snapshot/roots-serializer.cc
+++ b/deps/v8/src/snapshot/roots-serializer.cc
@@ -17,6 +17,7 @@ RootsSerializer::RootsSerializer(Isolate* isolate,
RootIndex first_root_to_be_serialized)
: Serializer(isolate, flags),
first_root_to_be_serialized_(first_root_to_be_serialized),
+ object_cache_index_map_(isolate->heap()),
can_be_rehashed_(true) {
for (size_t i = 0; i < static_cast<size_t>(first_root_to_be_serialized);
++i) {
@@ -24,7 +25,7 @@ RootsSerializer::RootsSerializer(Isolate* isolate,
}
}
-int RootsSerializer::SerializeInObjectCache(HeapObject heap_object) {
+int RootsSerializer::SerializeInObjectCache(Handle<HeapObject> heap_object) {
int index;
if (!object_cache_index_map_.LookupOrInsert(heap_object, &index)) {
// This object is not part of the object cache yet. Add it to the cache so
diff --git a/deps/v8/src/snapshot/roots-serializer.h b/deps/v8/src/snapshot/roots-serializer.h
index be41d7220f..7a699a7645 100644
--- a/deps/v8/src/snapshot/roots-serializer.h
+++ b/deps/v8/src/snapshot/roots-serializer.h
@@ -26,6 +26,8 @@ class RootsSerializer : public Serializer {
// are already serialized.
RootsSerializer(Isolate* isolate, Snapshot::SerializerFlags flags,
RootIndex first_root_to_be_serialized);
+ RootsSerializer(const RootsSerializer&) = delete;
+ RootsSerializer& operator=(const RootsSerializer&) = delete;
bool can_be_rehashed() const { return can_be_rehashed_; }
bool root_has_been_serialized(RootIndex root_index) const {
@@ -42,7 +44,7 @@ class RootsSerializer : public Serializer {
void CheckRehashability(HeapObject obj);
// Serializes |object| if not previously seen and returns its cache index.
- int SerializeInObjectCache(HeapObject object);
+ int SerializeInObjectCache(Handle<HeapObject> object);
private:
void VisitRootPointers(Root root, const char* description,
@@ -55,8 +57,6 @@ class RootsSerializer : public Serializer {
// Indicates whether we only serialized hash tables that we can rehash.
// TODO(yangguo): generalize rehashing, and remove this flag.
bool can_be_rehashed_;
-
- DISALLOW_COPY_AND_ASSIGN(RootsSerializer);
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/serializer-allocator.cc b/deps/v8/src/snapshot/serializer-allocator.cc
deleted file mode 100644
index a1bd9f43eb..0000000000
--- a/deps/v8/src/snapshot/serializer-allocator.cc
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/snapshot/serializer-allocator.h"
-
-#include "src/heap/heap-inl.h" // crbug.com/v8/8499
-#include "src/snapshot/references.h"
-#include "src/snapshot/serializer.h"
-#include "src/snapshot/snapshot-source-sink.h"
-
-namespace v8 {
-namespace internal {
-
-SerializerAllocator::SerializerAllocator(Serializer* serializer)
- : serializer_(serializer) {
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- pending_chunk_[i] = 0;
- }
-}
-
-void SerializerAllocator::UseCustomChunkSize(uint32_t chunk_size) {
- custom_chunk_size_ = chunk_size;
-}
-
-static uint32_t PageSizeOfSpace(SnapshotSpace space) {
- return static_cast<uint32_t>(
- MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
- static_cast<AllocationSpace>(space)));
-}
-
-uint32_t SerializerAllocator::TargetChunkSize(SnapshotSpace space) {
- if (custom_chunk_size_ == 0) return PageSizeOfSpace(space);
- DCHECK_LE(custom_chunk_size_, PageSizeOfSpace(space));
- return custom_chunk_size_;
-}
-
-SerializerReference SerializerAllocator::Allocate(SnapshotSpace space,
- uint32_t size) {
- const int space_number = static_cast<int>(space);
- DCHECK(IsPreAllocatedSpace(space));
- DCHECK(size > 0 && size <= PageSizeOfSpace(space));
-
- // Maps are allocated through AllocateMap.
- DCHECK_NE(SnapshotSpace::kMap, space);
-
- uint32_t old_chunk_size = pending_chunk_[space_number];
- uint32_t new_chunk_size = old_chunk_size + size;
- // Start a new chunk if the new size exceeds the target chunk size.
- // We may exceed the target chunk size if the single object size does.
- if (new_chunk_size > TargetChunkSize(space) && old_chunk_size != 0) {
- serializer_->PutNextChunk(space);
- completed_chunks_[space_number].push_back(pending_chunk_[space_number]);
- pending_chunk_[space_number] = 0;
- new_chunk_size = size;
- }
- uint32_t offset = pending_chunk_[space_number];
- pending_chunk_[space_number] = new_chunk_size;
- return SerializerReference::BackReference(
- space, static_cast<uint32_t>(completed_chunks_[space_number].size()),
- offset);
-}
-
-SerializerReference SerializerAllocator::AllocateMap() {
- // Maps are allocated one-by-one when deserializing.
- return SerializerReference::MapReference(num_maps_++);
-}
-
-SerializerReference SerializerAllocator::AllocateLargeObject(uint32_t size) {
- // Large objects are allocated one-by-one when deserializing. We do not
- // have to keep track of multiple chunks.
- large_objects_total_size_ += size;
- return SerializerReference::LargeObjectReference(seen_large_objects_index_++);
-}
-
-SerializerReference SerializerAllocator::AllocateOffHeapBackingStore() {
- DCHECK_NE(0, seen_backing_stores_index_);
- return SerializerReference::OffHeapBackingStoreReference(
- seen_backing_stores_index_++);
-}
-
-#ifdef DEBUG
-bool SerializerAllocator::BackReferenceIsAlreadyAllocated(
- SerializerReference reference) const {
- DCHECK(reference.is_back_reference());
- SnapshotSpace space = reference.space();
- if (space == SnapshotSpace::kLargeObject) {
- return reference.large_object_index() < seen_large_objects_index_;
- } else if (space == SnapshotSpace::kMap) {
- return reference.map_index() < num_maps_;
- } else if (space == SnapshotSpace::kReadOnlyHeap &&
- serializer_->isolate()->heap()->deserialization_complete()) {
- // If not deserializing the isolate itself, then we create BackReferences
- // for all read-only heap objects without ever allocating.
- return true;
- } else {
- const int space_number = static_cast<int>(space);
- size_t chunk_index = reference.chunk_index();
- if (chunk_index == completed_chunks_[space_number].size()) {
- return reference.chunk_offset() < pending_chunk_[space_number];
- } else {
- return chunk_index < completed_chunks_[space_number].size() &&
- reference.chunk_offset() <
- completed_chunks_[space_number][chunk_index];
- }
- }
-}
-#endif
-
-std::vector<SerializedData::Reservation>
-SerializerAllocator::EncodeReservations() const {
- std::vector<SerializedData::Reservation> out;
-
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- for (size_t j = 0; j < completed_chunks_[i].size(); j++) {
- out.emplace_back(completed_chunks_[i][j]);
- }
-
- if (pending_chunk_[i] > 0 || completed_chunks_[i].size() == 0) {
- out.emplace_back(pending_chunk_[i]);
- }
- out.back().mark_as_last();
- }
-
- STATIC_ASSERT(SnapshotSpace::kMap ==
- SnapshotSpace::kNumberOfPreallocatedSpaces);
- out.emplace_back(num_maps_ * Map::kSize);
- out.back().mark_as_last();
-
- STATIC_ASSERT(static_cast<int>(SnapshotSpace::kLargeObject) ==
- static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces) +
- 1);
- out.emplace_back(large_objects_total_size_);
- out.back().mark_as_last();
-
- return out;
-}
-
-void SerializerAllocator::OutputStatistics() {
- DCHECK(FLAG_serialization_statistics);
-
- PrintF(" Spaces (bytes):\n");
-
- for (int space = 0; space < kNumberOfSpaces; space++) {
- PrintF("%16s",
- BaseSpace::GetSpaceName(static_cast<AllocationSpace>(space)));
- }
- PrintF("\n");
-
- for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
- size_t s = pending_chunk_[space];
- for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
- PrintF("%16zu", s);
- }
-
- STATIC_ASSERT(SnapshotSpace::kMap ==
- SnapshotSpace::kNumberOfPreallocatedSpaces);
- PrintF("%16d", num_maps_ * Map::kSize);
-
- STATIC_ASSERT(static_cast<int>(SnapshotSpace::kLargeObject) ==
- static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces) +
- 1);
- PrintF("%16d\n", large_objects_total_size_);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/snapshot/serializer-allocator.h b/deps/v8/src/snapshot/serializer-allocator.h
deleted file mode 100644
index 51264961cd..0000000000
--- a/deps/v8/src/snapshot/serializer-allocator.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SNAPSHOT_SERIALIZER_ALLOCATOR_H_
-#define V8_SNAPSHOT_SERIALIZER_ALLOCATOR_H_
-
-#include "src/snapshot/references.h"
-#include "src/snapshot/snapshot-data.h"
-
-namespace v8 {
-namespace internal {
-
-class Serializer;
-
-class SerializerAllocator final {
- public:
- explicit SerializerAllocator(Serializer* serializer);
-
- SerializerReference Allocate(SnapshotSpace space, uint32_t size);
- SerializerReference AllocateMap();
- SerializerReference AllocateLargeObject(uint32_t size);
- SerializerReference AllocateOffHeapBackingStore();
-
- void UseCustomChunkSize(uint32_t chunk_size);
-
-#ifdef DEBUG
- bool BackReferenceIsAlreadyAllocated(
- SerializerReference back_reference) const;
-#endif
-
- std::vector<SerializedData::Reservation> EncodeReservations() const;
-
- void OutputStatistics();
-
- private:
- // We try to not exceed this size for every chunk. We will not succeed for
- // larger objects though.
- uint32_t TargetChunkSize(SnapshotSpace space);
-
- static constexpr int kNumberOfPreallocatedSpaces =
- static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
- static constexpr int kNumberOfSpaces =
- static_cast<int>(SnapshotSpace::kNumberOfSpaces);
-
- // Objects from the same space are put into chunks for bulk-allocation
- // when deserializing. We have to make sure that each chunk fits into a
- // page. So we track the chunk size in pending_chunk_ of a space, but
- // when it exceeds a page, we complete the current chunk and start a new one.
- uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
- std::vector<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
-
- // Number of maps that we need to allocate.
- uint32_t num_maps_ = 0;
-
- // We map serialized large objects to indexes for back-referencing.
- uint32_t large_objects_total_size_ = 0;
- uint32_t seen_large_objects_index_ = 0;
-
- // Used to keep track of the off-heap backing stores used by TypedArrays/
- // ArrayBuffers. Note that the index begins at 1 and not 0, because when a
- // TypedArray has an on-heap backing store, the backing_store pointer in the
- // corresponding ArrayBuffer will be null, which makes it indistinguishable
- // from index 0.
- uint32_t seen_backing_stores_index_ = 1;
-
- uint32_t custom_chunk_size_ = 0;
-
- // The current serializer.
- Serializer* const serializer_;
-
- DISALLOW_COPY_AND_ASSIGN(SerializerAllocator);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SNAPSHOT_SERIALIZER_ALLOCATOR_H_
diff --git a/deps/v8/src/snapshot/serializer-deserializer.cc b/deps/v8/src/snapshot/serializer-deserializer.cc
index 4055d4cca3..afa41e7d03 100644
--- a/deps/v8/src/snapshot/serializer-deserializer.cc
+++ b/deps/v8/src/snapshot/serializer-deserializer.cc
@@ -30,35 +30,26 @@ void SerializerDeserializer::Iterate(Isolate* isolate, RootVisitor* visitor) {
}
bool SerializerDeserializer::CanBeDeferred(HeapObject o) {
- // 1. Maps cannot be deferred as objects are expected to have a valid map
- // immediately.
- // 2. Internalized strings cannot be deferred as they might be
+ // Maps cannot be deferred as objects are expected to have a valid map
+ // immediately. Internalized strings cannot be deferred as they might be
// converted to thin strings during post processing, at which point forward
// references to the now-thin string will already have been written.
- // 3. JS objects with embedder fields cannot be deferred because the
- // serialize/deserialize callbacks need the back reference immediately to
- // identify the object.
// TODO(leszeks): Could we defer string serialization if forward references
// were resolved after object post processing?
- return !o.IsMap() && !o.IsInternalizedString() &&
- !(o.IsJSObject() && JSObject::cast(o).GetEmbedderFieldCount() > 0);
+ return !o.IsMap() && !o.IsInternalizedString();
}
-void SerializerDeserializer::RestoreExternalReferenceRedirectors(
- Isolate* isolate, const std::vector<AccessorInfo>& accessor_infos) {
+void SerializerDeserializer::RestoreExternalReferenceRedirector(
+ Isolate* isolate, Handle<AccessorInfo> accessor_info) {
// Restore wiped accessor infos.
- for (AccessorInfo info : accessor_infos) {
- Foreign::cast(info.js_getter())
- .set_foreign_address(isolate, info.redirected_getter());
- }
+ Foreign::cast(accessor_info->js_getter())
+ .set_foreign_address(isolate, accessor_info->redirected_getter());
}
-void SerializerDeserializer::RestoreExternalReferenceRedirectors(
- Isolate* isolate, const std::vector<CallHandlerInfo>& call_handler_infos) {
- for (CallHandlerInfo info : call_handler_infos) {
- Foreign::cast(info.js_callback())
- .set_foreign_address(isolate, info.redirected_callback());
- }
+void SerializerDeserializer::RestoreExternalReferenceRedirector(
+ Isolate* isolate, Handle<CallHandlerInfo> call_handler_info) {
+ Foreign::cast(call_handler_info->js_callback())
+ .set_foreign_address(isolate, call_handler_info->redirected_callback());
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/serializer-deserializer.h b/deps/v8/src/snapshot/serializer-deserializer.h
index c6c381192e..0e156f75a0 100644
--- a/deps/v8/src/snapshot/serializer-deserializer.h
+++ b/deps/v8/src/snapshot/serializer-deserializer.h
@@ -23,58 +23,20 @@ class SerializerDeserializer : public RootVisitor {
static void Iterate(Isolate* isolate, RootVisitor* visitor);
protected:
- class HotObjectsList {
- public:
- HotObjectsList() = default;
-
- void Add(HeapObject object) {
- DCHECK(!AllowGarbageCollection::IsAllowed());
- circular_queue_[index_] = object;
- index_ = (index_ + 1) & kSizeMask;
- }
-
- HeapObject Get(int index) {
- DCHECK(!AllowGarbageCollection::IsAllowed());
- DCHECK(!circular_queue_[index].is_null());
- return circular_queue_[index];
- }
-
- static const int kNotFound = -1;
-
- int Find(HeapObject object) {
- DCHECK(!AllowGarbageCollection::IsAllowed());
- for (int i = 0; i < kSize; i++) {
- if (circular_queue_[i] == object) return i;
- }
- return kNotFound;
- }
-
- static const int kSize = 8;
-
- private:
- STATIC_ASSERT(base::bits::IsPowerOfTwo(kSize));
- static const int kSizeMask = kSize - 1;
- HeapObject circular_queue_[kSize];
- int index_ = 0;
-
- DISALLOW_COPY_AND_ASSIGN(HotObjectsList);
- };
-
static bool CanBeDeferred(HeapObject o);
- void RestoreExternalReferenceRedirectors(
- Isolate* isolate, const std::vector<AccessorInfo>& accessor_infos);
- void RestoreExternalReferenceRedirectors(
- Isolate* isolate, const std::vector<CallHandlerInfo>& call_handler_infos);
-
- static const int kNumberOfSpaces =
- static_cast<int>(SnapshotSpace::kNumberOfSpaces);
+ void RestoreExternalReferenceRedirector(Isolate* isolate,
+ Handle<AccessorInfo> accessor_info);
+ void RestoreExternalReferenceRedirector(
+ Isolate* isolate, Handle<CallHandlerInfo> call_handler_info);
// clang-format off
#define UNUSED_SERIALIZER_BYTE_CODES(V) \
- V(0x05) V(0x06) V(0x07) V(0x0d) V(0x0e) V(0x0f) \
- /* Free range 0x2a..0x2f */ \
- V(0x2a) V(0x2b) V(0x2c) V(0x2d) V(0x2e) V(0x2f) \
+ /* Free range 0x1c..0x1f */ \
+ V(0x1c) V(0x1d) V(0x1e) V(0x1f) \
+ /* Free range 0x20..0x2f */ \
+ V(0x20) V(0x21) V(0x22) V(0x23) V(0x24) V(0x25) V(0x26) V(0x27) \
+ V(0x28) V(0x29) V(0x2a) V(0x2b) V(0x2c) V(0x2d) V(0x2e) V(0x2f) \
/* Free range 0x30..0x3f */ \
V(0x30) V(0x31) V(0x32) V(0x33) V(0x34) V(0x35) V(0x36) V(0x37) \
V(0x38) V(0x39) V(0x3a) V(0x3b) V(0x3c) V(0x3d) V(0x3e) V(0x3f) \
@@ -103,7 +65,7 @@ class SerializerDeserializer : public RootVisitor {
// The static assert below will trigger when the number of preallocated spaces
// changed. If that happens, update the kNewObject and kBackref bytecode
// ranges in the comments below.
- STATIC_ASSERT(5 == kNumberOfSpaces);
+ STATIC_ASSERT(4 == kNumberOfSnapshotSpaces);
// First 32 root array items.
static const int kRootArrayConstantsCount = 0x20;
@@ -115,27 +77,20 @@ class SerializerDeserializer : public RootVisitor {
// 8 hot (recently seen or back-referenced) objects with optional skip.
static const int kHotObjectCount = 8;
- STATIC_ASSERT(kHotObjectCount == HotObjectsList::kSize);
-
- // 3 alignment prefixes
- static const int kAlignmentPrefixCount = 3;
enum Bytecode : byte {
//
- // ---------- byte code range 0x00..0x0f ----------
+ // ---------- byte code range 0x00..0x1b ----------
//
- // 0x00..0x04 Allocate new object, in specified space.
+ // 0x00..0x03 Allocate new object, in specified space.
kNewObject = 0x00,
- // 0x08..0x0c Reference to previous object from specified space.
- kBackref = 0x08,
-
- //
- // ---------- byte code range 0x10..0x27 ----------
- //
-
+ // Reference to previously allocated object.
+ kBackref = 0x04,
+ // Reference to an object in the read only heap.
+ kReadOnlyHeapRef,
// Object in the startup object cache.
- kStartupObjectCache = 0x10,
+ kStartupObjectCache,
// Root array item.
kRootArray,
// Object provided in the attached list.
@@ -144,16 +99,12 @@ class SerializerDeserializer : public RootVisitor {
kReadOnlyObjectCache,
// Do nothing, used for padding.
kNop,
- // Move to next reserved chunk.
- kNextChunk,
- // 3 alignment prefixes 0x16..0x18
- kAlignmentPrefix = 0x16,
// A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together.
// Examine the build process for architecture, version or configuration
// mismatches.
- kSynchronize = 0x19,
+ kSynchronize,
// Repeats of variable length.
kVariableRepeat,
// Used for embedder-allocated backing stores for TypedArrays.
@@ -161,7 +112,6 @@ class SerializerDeserializer : public RootVisitor {
// Used for embedder-provided serialization data for embedder fields.
kEmbedderFieldsData,
// Raw data of variable length.
- kVariableRawCode,
kVariableRawData,
// Used to encode external references provided through the API.
kApiReference,
@@ -193,6 +143,9 @@ class SerializerDeserializer : public RootVisitor {
// register as the pending field. We could either hack around this, or
// simply introduce this new bytecode.
kNewMetaMap,
+ // Special construction bytecode for Code object bodies, which have a more
+ // complex deserialization ordering and RelocInfo processing.
+ kCodeBody,
//
// ---------- byte code range 0x40..0x7f ----------
@@ -248,15 +201,14 @@ class SerializerDeserializer : public RootVisitor {
template <Bytecode bytecode>
using SpaceEncoder =
- BytecodeValueEncoder<bytecode, 0, kNumberOfSpaces - 1, SnapshotSpace>;
+ BytecodeValueEncoder<bytecode, 0, kNumberOfSnapshotSpaces - 1,
+ SnapshotSpace>;
using NewObject = SpaceEncoder<kNewObject>;
- using BackRef = SpaceEncoder<kBackref>;
//
// Some other constants.
//
- static const SnapshotSpace kAnyOldSpace = SnapshotSpace::kNumberOfSpaces;
// Sentinel after a new object to indicate that double alignment is needed.
static const int kDoubleAlignmentSentinel = 0;
@@ -303,8 +255,9 @@ class SerializerDeserializer : public RootVisitor {
RootIndex>;
using HotObject = BytecodeValueEncoder<kHotObject, 0, kHotObjectCount - 1>;
- // ---------- member variable ----------
- HotObjectsList hot_objects_;
+ // This backing store reference value represents nullptr values during
+ // serialization/deserialization.
+ static const uint32_t kNullRefSentinel = 0;
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 4a18383e45..a0088315d3 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -14,21 +14,32 @@
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/map.h"
+#include "src/objects/objects-body-descriptors-inl.h"
#include "src/objects/slots-inl.h"
#include "src/objects/smi.h"
+#include "src/snapshot/serializer-deserializer.h"
namespace v8 {
namespace internal {
Serializer::Serializer(Isolate* isolate, Snapshot::SerializerFlags flags)
: isolate_(isolate),
+ hot_objects_(isolate->heap()),
+ reference_map_(isolate),
external_reference_encoder_(isolate),
root_index_map_(isolate),
- flags_(flags),
- allocator_(this) {
+ deferred_objects_(isolate->heap()),
+ forward_refs_per_pending_object_(isolate->heap()),
+ flags_(flags)
+#ifdef DEBUG
+ ,
+ back_refs_(isolate->heap()),
+ stack_(isolate->heap())
+#endif
+{
#ifdef OBJECT_PRINT
if (FLAG_serialization_statistics) {
- for (int space = 0; space < kNumberOfSpaces; ++space) {
+ for (int space = 0; space < kNumberOfSnapshotSpaces; ++space) {
// Value-initialized to 0.
instance_type_count_[space] = std::make_unique<int[]>(kInstanceTypes);
instance_type_size_[space] = std::make_unique<size_t[]>(kInstanceTypes);
@@ -37,25 +48,47 @@ Serializer::Serializer(Isolate* isolate, Snapshot::SerializerFlags flags)
#endif // OBJECT_PRINT
}
-#ifdef OBJECT_PRINT
-void Serializer::CountInstanceType(Map map, int size, SnapshotSpace space) {
+void Serializer::CountAllocation(Map map, int size, SnapshotSpace space) {
+ DCHECK(FLAG_serialization_statistics);
+
const int space_number = static_cast<int>(space);
+ allocation_size_[space_number] += size;
+#ifdef OBJECT_PRINT
int instance_type = map.instance_type();
instance_type_count_[space_number][instance_type]++;
instance_type_size_[space_number][instance_type] += size;
-}
#endif // OBJECT_PRINT
+}
+
+int Serializer::TotalAllocationSize() const {
+ int sum = 0;
+ for (int space = 0; space < kNumberOfSnapshotSpaces; space++) {
+ sum += allocation_size_[space];
+ }
+ return sum;
+}
void Serializer::OutputStatistics(const char* name) {
if (!FLAG_serialization_statistics) return;
PrintF("%s:\n", name);
- allocator()->OutputStatistics();
+
+ PrintF(" Spaces (bytes):\n");
+
+ for (int space = 0; space < kNumberOfSnapshotSpaces; space++) {
+ PrintF("%16s",
+ BaseSpace::GetSpaceName(static_cast<AllocationSpace>(space)));
+ }
+ PrintF("\n");
+
+ for (int space = 0; space < kNumberOfSnapshotSpaces; space++) {
+ PrintF("%16zu", allocation_size_[space]);
+ }
#ifdef OBJECT_PRINT
PrintF(" Instance types (count and bytes):\n");
#define PRINT_INSTANCE_TYPE(Name) \
- for (int space = 0; space < kNumberOfSpaces; ++space) { \
+ for (int space = 0; space < kNumberOfSnapshotSpaces; ++space) { \
if (instance_type_count_[space][Name]) { \
PrintF("%10d %10zu %-10s %s\n", instance_type_count_[space][Name], \
instance_type_size_[space][Name], \
@@ -74,15 +107,24 @@ void Serializer::SerializeDeferredObjects() {
if (FLAG_trace_serializer) {
PrintF("Serializing deferred objects\n");
}
- while (!deferred_objects_.empty()) {
- HeapObject obj = deferred_objects_.back();
- deferred_objects_.pop_back();
+ WHILE_WITH_HANDLE_SCOPE(isolate(), !deferred_objects_.empty(), {
+ Handle<HeapObject> obj = handle(deferred_objects_.Pop(), isolate());
+
ObjectSerializer obj_serializer(this, obj, &sink_);
obj_serializer.SerializeDeferred();
- }
+ });
sink_.Put(kSynchronize, "Finished with deferred objects");
}
+void Serializer::SerializeObject(Handle<HeapObject> obj) {
+ // ThinStrings are just an indirection to an internalized string, so elide the
+ // indirection and serialize the actual string directly.
+ if (obj->IsThinString(isolate())) {
+ obj = handle(ThinString::cast(*obj).actual(isolate()), isolate());
+ }
+ SerializeObjectImpl(obj);
+}
+
bool Serializer::MustBeDeferred(HeapObject object) { return false; }
void Serializer::VisitRootPointers(Root root, const char* description,
@@ -97,7 +139,7 @@ void Serializer::SerializeRootObject(FullObjectSlot slot) {
if (o.IsSmi()) {
PutSmiRoot(slot);
} else {
- SerializeObject(HeapObject::cast(o));
+ SerializeObject(Handle<HeapObject>(slot.location()));
}
}
@@ -106,88 +148,87 @@ void Serializer::PrintStack() { PrintStack(std::cout); }
void Serializer::PrintStack(std::ostream& out) {
for (const auto o : stack_) {
- o.Print(out);
+ o->Print(out);
out << "\n";
}
}
#endif // DEBUG
-bool Serializer::SerializeRoot(HeapObject obj) {
+bool Serializer::SerializeRoot(Handle<HeapObject> obj) {
RootIndex root_index;
// Derived serializers are responsible for determining if the root has
// actually been serialized before calling this.
- if (root_index_map()->Lookup(obj, &root_index)) {
- PutRoot(root_index, obj);
+ if (root_index_map()->Lookup(*obj, &root_index)) {
+ PutRoot(root_index);
return true;
}
return false;
}
-bool Serializer::SerializeHotObject(HeapObject obj) {
+bool Serializer::SerializeHotObject(Handle<HeapObject> obj) {
// Encode a reference to a hot object by its index in the working set.
- int index = hot_objects_.Find(obj);
+ int index = hot_objects_.Find(*obj);
if (index == HotObjectsList::kNotFound) return false;
DCHECK(index >= 0 && index < kHotObjectCount);
if (FLAG_trace_serializer) {
PrintF(" Encoding hot object %d:", index);
- obj.ShortPrint();
+ obj->ShortPrint();
PrintF("\n");
}
sink_.Put(HotObject::Encode(index), "HotObject");
return true;
}
-bool Serializer::SerializeBackReference(HeapObject obj) {
- SerializerReference reference =
- reference_map_.LookupReference(reinterpret_cast<void*>(obj.ptr()));
- if (!reference.is_valid()) return false;
+bool Serializer::SerializeBackReference(Handle<HeapObject> obj) {
+ const SerializerReference* reference = reference_map_.LookupReference(obj);
+ if (reference == nullptr) return false;
// Encode the location of an already deserialized object in order to write
// its location into a later object. We can encode the location as an
// offset fromthe start of the deserialized objects or as an offset
// backwards from thecurrent allocation pointer.
- if (reference.is_attached_reference()) {
+ if (reference->is_attached_reference()) {
if (FLAG_trace_serializer) {
PrintF(" Encoding attached reference %d\n",
- reference.attached_reference_index());
+ reference->attached_reference_index());
}
- PutAttachedReference(reference);
+ PutAttachedReference(*reference);
} else {
- DCHECK(reference.is_back_reference());
+ DCHECK(reference->is_back_reference());
if (FLAG_trace_serializer) {
PrintF(" Encoding back reference to: ");
- obj.ShortPrint();
+ obj->ShortPrint();
PrintF("\n");
}
- PutAlignmentPrefix(obj);
- SnapshotSpace space = reference.space();
- sink_.Put(BackRef::Encode(space), "BackRef");
- PutBackReference(obj, reference);
+ sink_.Put(kBackref, "Backref");
+ PutBackReference(obj, *reference);
}
return true;
}
-bool Serializer::SerializePendingObject(HeapObject obj) {
- PendingObjectReference pending_obj =
- forward_refs_per_pending_object_.find(obj);
- if (pending_obj == forward_refs_per_pending_object_.end()) {
+bool Serializer::SerializePendingObject(Handle<HeapObject> obj) {
+ PendingObjectReferences* refs_to_object =
+ forward_refs_per_pending_object_.Find(obj);
+ if (refs_to_object == nullptr) {
return false;
}
- PutPendingForwardReferenceTo(pending_obj);
+ PutPendingForwardReference(*refs_to_object);
return true;
}
-bool Serializer::ObjectIsBytecodeHandler(HeapObject obj) const {
- if (!obj.IsCode()) return false;
- return (Code::cast(obj).kind() == CodeKind::BYTECODE_HANDLER);
+bool Serializer::ObjectIsBytecodeHandler(Handle<HeapObject> obj) const {
+ if (!obj->IsCode()) return false;
+ return (Code::cast(*obj).kind() == CodeKind::BYTECODE_HANDLER);
}
-void Serializer::PutRoot(RootIndex root, HeapObject object) {
+void Serializer::PutRoot(RootIndex root) {
int root_index = static_cast<int>(root);
+ Handle<HeapObject> object =
+ Handle<HeapObject>::cast(isolate()->root_handle(root));
if (FLAG_trace_serializer) {
PrintF(" Encoding root %d:", root_index);
- object.ShortPrint();
+ object->ShortPrint();
PrintF("\n");
}
@@ -198,12 +239,12 @@ void Serializer::PutRoot(RootIndex root, HeapObject object) {
// TODO(ulan): Check that it works with young large objects.
if (root_index < kRootArrayConstantsCount &&
- !Heap::InYoungGeneration(object)) {
+ !Heap::InYoungGeneration(*object)) {
sink_.Put(RootArrayConstant::Encode(root), "RootConstant");
} else {
sink_.Put(kRootArray, "RootSerialization");
sink_.PutInt(root_index, "root_index");
- hot_objects_.Add(object);
+ hot_objects_.Add(*object);
}
}
@@ -222,25 +263,11 @@ void Serializer::PutSmiRoot(FullObjectSlot slot) {
sink_.PutRaw(raw_value_as_bytes, bytes_to_output, "Bytes");
}
-void Serializer::PutBackReference(HeapObject object,
+void Serializer::PutBackReference(Handle<HeapObject> object,
SerializerReference reference) {
- DCHECK(allocator()->BackReferenceIsAlreadyAllocated(reference));
- switch (reference.space()) {
- case SnapshotSpace::kMap:
- sink_.PutInt(reference.map_index(), "BackRefMapIndex");
- break;
-
- case SnapshotSpace::kLargeObject:
- sink_.PutInt(reference.large_object_index(), "BackRefLargeObjectIndex");
- break;
-
- default:
- sink_.PutInt(reference.chunk_index(), "BackRefChunkIndex");
- sink_.PutInt(reference.chunk_offset(), "BackRefChunkOffset");
- break;
- }
-
- hot_objects_.Add(object);
+ DCHECK_EQ(*object, *back_refs_[reference.back_ref_index()]);
+ sink_.PutInt(reference.back_ref_index(), "BackRefIndex");
+ hot_objects_.Add(*object);
}
void Serializer::PutAttachedReference(SerializerReference reference) {
@@ -249,22 +276,6 @@ void Serializer::PutAttachedReference(SerializerReference reference) {
sink_.PutInt(reference.attached_reference_index(), "AttachedRefIndex");
}
-int Serializer::PutAlignmentPrefix(HeapObject object) {
- AllocationAlignment alignment = HeapObject::RequiredAlignment(object.map());
- if (alignment != kWordAligned) {
- DCHECK(1 <= alignment && alignment <= 3);
- byte prefix = (kAlignmentPrefix - 1) + alignment;
- sink_.Put(prefix, "Alignment");
- return Heap::GetMaximumFillToAlign(alignment);
- }
- return 0;
-}
-
-void Serializer::PutNextChunk(SnapshotSpace space) {
- sink_.Put(kNextChunk, "NextChunk");
- sink_.Put(static_cast<byte>(space), "NextChunkSpace");
-}
-
void Serializer::PutRepeat(int repeat_count) {
if (repeat_count <= kLastEncodableFixedRepeatCount) {
sink_.Put(FixedRepeatWithCount::Encode(repeat_count), "FixedRepeat");
@@ -274,13 +285,19 @@ void Serializer::PutRepeat(int repeat_count) {
}
}
-void Serializer::PutPendingForwardReferenceTo(
- PendingObjectReference reference) {
+void Serializer::PutPendingForwardReference(PendingObjectReferences& refs) {
sink_.Put(kRegisterPendingForwardRef, "RegisterPendingForwardRef");
unresolved_forward_refs_++;
// Register the current slot with the pending object.
int forward_ref_id = next_forward_ref_id_++;
- reference->second.push_back(forward_ref_id);
+ if (refs == nullptr) {
+ // The IdentityMap holding the pending object reference vectors does not
+ // support non-trivial types; in particular it doesn't support destructors
+ // on values. So, we manually allocate a vector with new, and delete it when
+ // resolving the pending object.
+ refs = new std::vector<int>();
+ }
+ refs->push_back(forward_ref_id);
}
void Serializer::ResolvePendingForwardReference(int forward_reference_id) {
@@ -295,27 +312,34 @@ void Serializer::ResolvePendingForwardReference(int forward_reference_id) {
}
}
-Serializer::PendingObjectReference Serializer::RegisterObjectIsPending(
- HeapObject obj) {
+void Serializer::RegisterObjectIsPending(Handle<HeapObject> obj) {
+ if (*obj == ReadOnlyRoots(isolate()).not_mapped_symbol()) return;
+
// Add the given object to the pending objects -> forward refs map.
- auto forward_refs_entry_insertion =
- forward_refs_per_pending_object_.emplace(obj, std::vector<int>());
+ auto find_result = forward_refs_per_pending_object_.FindOrInsert(obj);
+ USE(find_result);
// If the above emplace didn't actually add the object, then the object must
// already have been registered pending by deferring. It might not be in the
// deferred objects queue though, since it may be the very object we just
// popped off that queue, so just check that it can be deferred.
- DCHECK_IMPLIES(!forward_refs_entry_insertion.second, CanBeDeferred(obj));
-
- // return the iterator into the map as the reference.
- return forward_refs_entry_insertion.first;
+ DCHECK_IMPLIES(find_result.already_exists, *find_result.entry != nullptr);
+ DCHECK_IMPLIES(find_result.already_exists, CanBeDeferred(*obj));
}
-void Serializer::ResolvePendingObject(Serializer::PendingObjectReference ref) {
- for (int index : ref->second) {
- ResolvePendingForwardReference(index);
+void Serializer::ResolvePendingObject(Handle<HeapObject> obj) {
+ if (*obj == ReadOnlyRoots(isolate()).not_mapped_symbol()) return;
+
+ std::vector<int>* refs;
+ CHECK(forward_refs_per_pending_object_.Delete(obj, &refs));
+ if (refs) {
+ for (int index : *refs) {
+ ResolvePendingForwardReference(index);
+ }
+ // See PutPendingForwardReference -- we have to manually manage the memory
+ // of non-trivial IdentityMap values.
+ delete refs;
}
- forward_refs_per_pending_object_.erase(ref);
}
void Serializer::Pad(int padding_offset) {
@@ -351,19 +375,17 @@ void Serializer::ObjectSerializer::SerializePrologue(SnapshotSpace space,
int size, Map map) {
if (serializer_->code_address_map_) {
const char* code_name =
- serializer_->code_address_map_->Lookup(object_.address());
+ serializer_->code_address_map_->Lookup(object_->address());
LOG(serializer_->isolate_,
- CodeNameEvent(object_.address(), sink_->Position(), code_name));
+ CodeNameEvent(object_->address(), sink_->Position(), code_name));
}
- SerializerReference back_reference;
- if (map == object_) {
- DCHECK_EQ(object_, ReadOnlyRoots(serializer_->isolate()).meta_map());
+ if (map == *object_) {
+ DCHECK_EQ(*object_, ReadOnlyRoots(isolate()).meta_map());
DCHECK_EQ(space, SnapshotSpace::kReadOnlyHeap);
sink_->Put(kNewMetaMap, "NewMetaMap");
DCHECK_EQ(size, Map::kSize);
- back_reference = serializer_->allocator()->Allocate(space, size);
} else {
sink_->Put(NewObject::Encode(space), "NewObject");
@@ -371,133 +393,157 @@ void Serializer::ObjectSerializer::SerializePrologue(SnapshotSpace space,
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
// Until the space for the object is allocated, it is considered "pending".
- auto pending_object_ref = serializer_->RegisterObjectIsPending(object_);
+ serializer_->RegisterObjectIsPending(object_);
// Serialize map (first word of the object) before anything else, so that
// the deserializer can access it when allocating. Make sure that the map
// isn't a pending object.
- DCHECK_EQ(serializer_->forward_refs_per_pending_object_.count(map), 0);
+ DCHECK_NULL(serializer_->forward_refs_per_pending_object_.Find(map));
DCHECK(map.IsMap());
- serializer_->SerializeObject(map);
+ serializer_->SerializeObject(handle(map, isolate()));
// Make sure the map serialization didn't accidentally recursively serialize
// this object.
- DCHECK(!serializer_->reference_map()
- ->LookupReference(reinterpret_cast<void*>(object_.ptr()))
- .is_valid());
-
- // Allocate the object after the map is serialized.
- if (space == SnapshotSpace::kLargeObject) {
- CHECK(!object_.IsCode());
- back_reference = serializer_->allocator()->AllocateLargeObject(size);
- } else if (space == SnapshotSpace::kMap) {
- back_reference = serializer_->allocator()->AllocateMap();
- DCHECK_EQ(Map::kSize, size);
- } else {
- int fill = serializer_->PutAlignmentPrefix(object_);
- back_reference = serializer_->allocator()->Allocate(space, size + fill);
- }
+ DCHECK_IMPLIES(
+ *object_ != ReadOnlyRoots(isolate()).not_mapped_symbol(),
+ serializer_->reference_map()->LookupReference(object_) == nullptr);
// Now that the object is allocated, we can resolve pending references to
// it.
- serializer_->ResolvePendingObject(pending_object_ref);
+ serializer_->ResolvePendingObject(object_);
}
-#ifdef OBJECT_PRINT
if (FLAG_serialization_statistics) {
- serializer_->CountInstanceType(map, size, space);
+ serializer_->CountAllocation(object_->map(), size, space);
}
-#endif // OBJECT_PRINT
- // Mark this object as already serialized.
- serializer_->reference_map()->Add(reinterpret_cast<void*>(object_.ptr()),
- back_reference);
+ // Mark this object as already serialized, and add it to the reference map so
+ // that it can be accessed by backreference by future objects.
+ serializer_->num_back_refs_++;
+#ifdef DEBUG
+ serializer_->back_refs_.Push(*object_);
+ DCHECK_EQ(serializer_->back_refs_.size(), serializer_->num_back_refs_);
+#endif
+ if (*object_ != ReadOnlyRoots(isolate()).not_mapped_symbol()) {
+ // Only add the object to the map if it's not not_mapped_symbol, else
+ // the reference IdentityMap has issues. We don't expect to have back
+ // references to the not_mapped_symbol anyway, so it's fine.
+ SerializerReference back_reference =
+ SerializerReference::BackReference(serializer_->num_back_refs_ - 1);
+ serializer_->reference_map()->Add(*object_, back_reference);
+ DCHECK_EQ(*object_,
+ *serializer_->back_refs_[back_reference.back_ref_index()]);
+ DCHECK_EQ(back_reference.back_ref_index(), serializer_->reference_map()
+ ->LookupReference(object_)
+ ->back_ref_index());
+ }
}
uint32_t Serializer::ObjectSerializer::SerializeBackingStore(
void* backing_store, int32_t byte_length) {
- SerializerReference reference =
- serializer_->reference_map()->LookupReference(backing_store);
+ const SerializerReference* reference_ptr =
+ serializer_->reference_map()->LookupBackingStore(backing_store);
// Serialize the off-heap backing store.
- if (!reference.is_valid()) {
+ if (!reference_ptr) {
sink_->Put(kOffHeapBackingStore, "Off-heap backing store");
sink_->PutInt(byte_length, "length");
sink_->PutRaw(static_cast<byte*>(backing_store), byte_length,
"BackingStore");
- reference = serializer_->allocator()->AllocateOffHeapBackingStore();
+ DCHECK_NE(0, serializer_->seen_backing_stores_index_);
+ SerializerReference reference =
+ SerializerReference::OffHeapBackingStoreReference(
+ serializer_->seen_backing_stores_index_++);
// Mark this backing store as already serialized.
- serializer_->reference_map()->Add(backing_store, reference);
+ serializer_->reference_map()->AddBackingStore(backing_store, reference);
+ return reference.off_heap_backing_store_index();
+ } else {
+ return reference_ptr->off_heap_backing_store_index();
}
-
- return reference.off_heap_backing_store_index();
}
void Serializer::ObjectSerializer::SerializeJSTypedArray() {
- JSTypedArray typed_array = JSTypedArray::cast(object_);
- if (typed_array.is_on_heap()) {
- typed_array.RemoveExternalPointerCompensationForSerialization(
- serializer_->isolate());
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object_);
+ if (typed_array->is_on_heap()) {
+ typed_array->RemoveExternalPointerCompensationForSerialization(isolate());
} else {
- if (!typed_array.WasDetached()) {
+ if (!typed_array->WasDetached()) {
// Explicitly serialize the backing store now.
- JSArrayBuffer buffer = JSArrayBuffer::cast(typed_array.buffer());
+ JSArrayBuffer buffer = JSArrayBuffer::cast(typed_array->buffer());
// We cannot store byte_length larger than int32 range in the snapshot.
CHECK_LE(buffer.byte_length(), std::numeric_limits<int32_t>::max());
int32_t byte_length = static_cast<int32_t>(buffer.byte_length());
- size_t byte_offset = typed_array.byte_offset();
+ size_t byte_offset = typed_array->byte_offset();
// We need to calculate the backing store from the data pointer
// because the ArrayBuffer may already have been serialized.
void* backing_store = reinterpret_cast<void*>(
- reinterpret_cast<Address>(typed_array.DataPtr()) - byte_offset);
+ reinterpret_cast<Address>(typed_array->DataPtr()) - byte_offset);
uint32_t ref = SerializeBackingStore(backing_store, byte_length);
- typed_array.SetExternalBackingStoreRefForSerialization(ref);
+ typed_array->SetExternalBackingStoreRefForSerialization(ref);
} else {
- typed_array.SetExternalBackingStoreRefForSerialization(0);
+ typed_array->SetExternalBackingStoreRefForSerialization(0);
}
}
SerializeObject();
}
void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
- JSArrayBuffer buffer = JSArrayBuffer::cast(object_);
- void* backing_store = buffer.backing_store();
+ Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(object_);
+ void* backing_store = buffer->backing_store();
// We cannot store byte_length larger than int32 range in the snapshot.
- CHECK_LE(buffer.byte_length(), std::numeric_limits<int32_t>::max());
- int32_t byte_length = static_cast<int32_t>(buffer.byte_length());
- ArrayBufferExtension* extension = buffer.extension();
+ CHECK_LE(buffer->byte_length(), std::numeric_limits<int32_t>::max());
+ int32_t byte_length = static_cast<int32_t>(buffer->byte_length());
+ ArrayBufferExtension* extension = buffer->extension();
// The embedder-allocated backing store only exists for the off-heap case.
+#ifdef V8_HEAP_SANDBOX
+ uint32_t external_pointer_entry =
+ buffer->GetBackingStoreRefForDeserialization();
+#endif
if (backing_store != nullptr) {
uint32_t ref = SerializeBackingStore(backing_store, byte_length);
- buffer.SetBackingStoreRefForSerialization(ref);
+ buffer->SetBackingStoreRefForSerialization(ref);
// Ensure deterministic output by setting extension to null during
// serialization.
- buffer.set_extension(nullptr);
+ buffer->set_extension(nullptr);
+ } else {
+ buffer->SetBackingStoreRefForSerialization(kNullRefSentinel);
}
SerializeObject();
- buffer.set_backing_store(serializer_->isolate(), backing_store);
- buffer.set_extension(extension);
+#ifdef V8_HEAP_SANDBOX
+ buffer->SetBackingStoreRefForSerialization(external_pointer_entry);
+#else
+ buffer->set_backing_store(isolate(), backing_store);
+#endif
+ buffer->set_extension(extension);
}
void Serializer::ObjectSerializer::SerializeExternalString() {
// For external strings with known resources, we replace the resource field
// with the encoded external reference, which we restore upon deserialize.
// For the rest we serialize them to look like ordinary sequential strings.
- ExternalString string = ExternalString::cast(object_);
- Address resource = string.resource_as_address();
+ Handle<ExternalString> string = Handle<ExternalString>::cast(object_);
+ Address resource = string->resource_as_address();
ExternalReferenceEncoder::Value reference;
if (serializer_->external_reference_encoder_.TryEncode(resource).To(
&reference)) {
DCHECK(reference.is_from_api());
- string.set_uint32_as_resource(serializer_->isolate(), reference.index());
+#ifdef V8_HEAP_SANDBOX
+ uint32_t external_pointer_entry =
+ string->GetResourceRefForDeserialization();
+#endif
+ string->SetResourceRefForSerialization(reference.index());
SerializeObject();
- string.set_address_as_resource(serializer_->isolate(), resource);
+#ifdef V8_HEAP_SANDBOX
+ string->SetResourceRefForSerialization(external_pointer_entry);
+#else
+ string->set_address_as_resource(isolate(), resource);
+#endif
} else {
SerializeExternalStringAsSequentialString();
}
@@ -506,46 +552,45 @@ void Serializer::ObjectSerializer::SerializeExternalString() {
void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
// Instead of serializing this as an external string, we serialize
// an imaginary sequential string with the same content.
- ReadOnlyRoots roots(serializer_->isolate());
- DCHECK(object_.IsExternalString());
- ExternalString string = ExternalString::cast(object_);
- int length = string.length();
+ ReadOnlyRoots roots(isolate());
+ DCHECK(object_->IsExternalString());
+ Handle<ExternalString> string = Handle<ExternalString>::cast(object_);
+ int length = string->length();
Map map;
int content_size;
int allocation_size;
const byte* resource;
// Find the map and size for the imaginary sequential string.
- bool internalized = object_.IsInternalizedString();
- if (object_.IsExternalOneByteString()) {
+ bool internalized = object_->IsInternalizedString();
+ if (object_->IsExternalOneByteString()) {
map = internalized ? roots.one_byte_internalized_string_map()
: roots.one_byte_string_map();
allocation_size = SeqOneByteString::SizeFor(length);
content_size = length * kCharSize;
resource = reinterpret_cast<const byte*>(
- ExternalOneByteString::cast(string).resource()->data());
+ Handle<ExternalOneByteString>::cast(string)->resource()->data());
} else {
map = internalized ? roots.internalized_string_map() : roots.string_map();
allocation_size = SeqTwoByteString::SizeFor(length);
content_size = length * kShortSize;
resource = reinterpret_cast<const byte*>(
- ExternalTwoByteString::cast(string).resource()->data());
+ Handle<ExternalTwoByteString>::cast(string)->resource()->data());
}
- SnapshotSpace space = (allocation_size > kMaxRegularHeapObjectSize)
- ? SnapshotSpace::kLargeObject
- : SnapshotSpace::kOld;
+ SnapshotSpace space = SnapshotSpace::kOld;
SerializePrologue(space, allocation_size, map);
// Output the rest of the imaginary string.
int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
DCHECK(IsAligned(bytes_to_output, kTaggedSize));
+ int slots_to_output = bytes_to_output >> kTaggedSizeLog2;
// Output raw data header. Do not bother with common raw length cases here.
sink_->Put(kVariableRawData, "RawDataForString");
- sink_->PutInt(bytes_to_output, "length");
+ sink_->PutInt(slots_to_output, "length");
// Serialize string header (except for map).
- byte* string_start = reinterpret_cast<byte*>(string.address());
+ byte* string_start = reinterpret_cast<byte*>(string->address());
for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
sink_->Put(string_start[i], "StringHeader");
}
@@ -565,26 +610,27 @@ void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
// TODO(all): replace this with proper iteration of weak slots in serializer.
class UnlinkWeakNextScope {
public:
- explicit UnlinkWeakNextScope(Heap* heap, HeapObject object) {
- if (object.IsAllocationSite() &&
- AllocationSite::cast(object).HasWeakNext()) {
+ explicit UnlinkWeakNextScope(Heap* heap, Handle<HeapObject> object) {
+ if (object->IsAllocationSite() &&
+ Handle<AllocationSite>::cast(object)->HasWeakNext()) {
object_ = object;
- next_ = AllocationSite::cast(object).weak_next();
- AllocationSite::cast(object).set_weak_next(
+ next_ =
+ handle(AllocationSite::cast(*object).weak_next(), heap->isolate());
+ Handle<AllocationSite>::cast(object)->set_weak_next(
ReadOnlyRoots(heap).undefined_value());
}
}
~UnlinkWeakNextScope() {
if (!object_.is_null()) {
- AllocationSite::cast(object_).set_weak_next(next_,
- UPDATE_WEAK_WRITE_BARRIER);
+ Handle<AllocationSite>::cast(object_)->set_weak_next(
+ *next_, UPDATE_WEAK_WRITE_BARRIER);
}
}
private:
- HeapObject object_;
- Object next_;
+ Handle<HeapObject> object_;
+ Handle<Object> next_;
DISALLOW_HEAP_ALLOCATION(no_gc_)
};
@@ -593,103 +639,120 @@ void Serializer::ObjectSerializer::Serialize() {
// Defer objects as "pending" if they cannot be serialized now, or if we
// exceed a certain recursion depth. Some objects cannot be deferred
- if ((recursion.ExceedsMaximum() && CanBeDeferred(object_)) ||
- serializer_->MustBeDeferred(object_)) {
- DCHECK(CanBeDeferred(object_));
+ if ((recursion.ExceedsMaximum() && CanBeDeferred(*object_)) ||
+ serializer_->MustBeDeferred(*object_)) {
+ DCHECK(CanBeDeferred(*object_));
if (FLAG_trace_serializer) {
PrintF(" Deferring heap object: ");
- object_.ShortPrint();
+ object_->ShortPrint();
PrintF("\n");
}
// Deferred objects are considered "pending".
- PendingObjectReference pending_obj =
- serializer_->RegisterObjectIsPending(object_);
- serializer_->PutPendingForwardReferenceTo(pending_obj);
+ serializer_->RegisterObjectIsPending(object_);
+ serializer_->PutPendingForwardReference(
+ *serializer_->forward_refs_per_pending_object_.Find(object_));
serializer_->QueueDeferredObject(object_);
return;
}
if (FLAG_trace_serializer) {
PrintF(" Encoding heap object: ");
- object_.ShortPrint();
+ object_->ShortPrint();
PrintF("\n");
}
- if (object_.IsExternalString()) {
+ if (object_->IsExternalString()) {
SerializeExternalString();
return;
- } else if (!ReadOnlyHeap::Contains(object_)) {
+ } else if (!ReadOnlyHeap::Contains(*object_)) {
// Only clear padding for strings outside the read-only heap. Read-only heap
// should have been cleared elsewhere.
- if (object_.IsSeqOneByteString()) {
+ if (object_->IsSeqOneByteString()) {
// Clear padding bytes at the end. Done here to avoid having to do this
// at allocation sites in generated code.
- SeqOneByteString::cast(object_).clear_padding();
- } else if (object_.IsSeqTwoByteString()) {
- SeqTwoByteString::cast(object_).clear_padding();
+ Handle<SeqOneByteString>::cast(object_)->clear_padding();
+ } else if (object_->IsSeqTwoByteString()) {
+ Handle<SeqTwoByteString>::cast(object_)->clear_padding();
}
}
- if (object_.IsJSTypedArray()) {
+ if (object_->IsJSTypedArray()) {
SerializeJSTypedArray();
return;
- }
- if (object_.IsJSArrayBuffer()) {
+ } else if (object_->IsJSArrayBuffer()) {
SerializeJSArrayBuffer();
return;
}
// We don't expect fillers.
- DCHECK(!object_.IsFreeSpaceOrFiller());
+ DCHECK(!object_->IsFreeSpaceOrFiller());
- if (object_.IsScript()) {
+ if (object_->IsScript()) {
// Clear cached line ends.
- Oddball undefined = ReadOnlyRoots(serializer_->isolate()).undefined_value();
- Script::cast(object_).set_line_ends(undefined);
+ Oddball undefined = ReadOnlyRoots(isolate()).undefined_value();
+ Handle<Script>::cast(object_)->set_line_ends(undefined);
}
SerializeObject();
}
namespace {
-SnapshotSpace GetSnapshotSpace(HeapObject object) {
+SnapshotSpace GetSnapshotSpace(Handle<HeapObject> object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- if (third_party_heap::Heap::InCodeSpace(object.address())) {
+ if (object->IsCode()) {
return SnapshotSpace::kCode;
- } else if (ReadOnlyHeap::Contains(object)) {
+ } else if (ReadOnlyHeap::Contains(*object)) {
return SnapshotSpace::kReadOnlyHeap;
- } else if (object.Size() > kMaxRegularHeapObjectSize) {
- return SnapshotSpace::kLargeObject;
- } else if (object.IsMap()) {
+ } else if (object->IsMap()) {
return SnapshotSpace::kMap;
} else {
- return SnapshotSpace::kOld; // avoid new/young distinction in TPH
+ return SnapshotSpace::kOld;
}
- } else if (ReadOnlyHeap::Contains(object)) {
+ } else if (ReadOnlyHeap::Contains(*object)) {
return SnapshotSpace::kReadOnlyHeap;
} else {
AllocationSpace heap_space =
- MemoryChunk::FromHeapObject(object)->owner_identity();
+ MemoryChunk::FromHeapObject(*object)->owner_identity();
// Large code objects are not supported and cannot be expressed by
// SnapshotSpace.
DCHECK_NE(heap_space, CODE_LO_SPACE);
switch (heap_space) {
+ case OLD_SPACE:
// Young generation objects are tenured, as objects that have survived
// until snapshot building probably deserve to be considered 'old'.
case NEW_SPACE:
- return SnapshotSpace::kOld;
+ // Large objects (young and old) are encoded as simply 'old' snapshot
+ // obects, as "normal" objects vs large objects is a heap implementation
+ // detail and isn't relevant to the snapshot.
case NEW_LO_SPACE:
- return SnapshotSpace::kLargeObject;
-
- default:
- return static_cast<SnapshotSpace>(heap_space);
+ case LO_SPACE:
+ return SnapshotSpace::kOld;
+ case CODE_SPACE:
+ return SnapshotSpace::kCode;
+ case MAP_SPACE:
+ return SnapshotSpace::kMap;
+ case CODE_LO_SPACE:
+ case RO_SPACE:
+ UNREACHABLE();
}
}
}
} // namespace
void Serializer::ObjectSerializer::SerializeObject() {
- int size = object_.Size();
- Map map = object_.map();
+ int size = object_->Size();
+ Map map = object_->map();
+
+ // Descriptor arrays have complex element weakness, that is dependent on the
+ // maps pointing to them. During deserialization, this can cause them to get
+ // prematurely trimmed one of their owners isn't deserialized yet. We work
+ // around this by forcing all descriptor arrays to be serialized as "strong",
+ // i.e. no custom weakness, and "re-weaken" them in the deserializer once
+ // deserialization completes.
+ //
+ // See also `Deserializer::WeakenDescriptorArrays`.
+ if (map == ReadOnlyRoots(isolate()).descriptor_array_map()) {
+ map = ReadOnlyRoots(isolate()).strong_descriptor_array_map();
+ }
SnapshotSpace space = GetSnapshotSpace(object_);
SerializePrologue(space, size, map);
@@ -701,14 +764,13 @@ void Serializer::ObjectSerializer::SerializeObject() {
}
void Serializer::ObjectSerializer::SerializeDeferred() {
- SerializerReference back_reference =
- serializer_->reference_map()->LookupReference(
- reinterpret_cast<void*>(object_.ptr()));
+ const SerializerReference* back_reference =
+ serializer_->reference_map()->LookupReference(object_);
- if (back_reference.is_valid()) {
+ if (back_reference != nullptr) {
if (FLAG_trace_serializer) {
PrintF(" Deferred heap object ");
- object_.ShortPrint();
+ object_->ShortPrint();
PrintF(" was already serialized\n");
}
return;
@@ -721,17 +783,15 @@ void Serializer::ObjectSerializer::SerializeDeferred() {
}
void Serializer::ObjectSerializer::SerializeContent(Map map, int size) {
- UnlinkWeakNextScope unlink_weak_next(serializer_->isolate()->heap(), object_);
- if (object_.IsCode()) {
- // For code objects, output raw bytes first.
- OutputCode(size);
- // Then iterate references via reloc info.
- object_.IterateBody(map, size, this);
+ UnlinkWeakNextScope unlink_weak_next(isolate()->heap(), object_);
+ if (object_->IsCode()) {
+ // For code objects, perform a custom serialization.
+ SerializeCode(map, size);
} else {
// For other objects, iterate references first.
- object_.IterateBody(map, size, this);
+ object_->IterateBody(map, size, this);
// Then output data payload, if any.
- OutputRawData(object_.address() + size);
+ OutputRawData(object_->address() + size);
}
}
@@ -744,6 +804,7 @@ void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
MaybeObjectSlot start,
MaybeObjectSlot end) {
+ HandleScope scope(isolate());
DisallowGarbageCollection no_gc;
MaybeObjectSlot current = start;
@@ -771,7 +832,8 @@ void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
sink_->Put(kWeakPrefix, "WeakReference");
}
- if (serializer_->SerializePendingObject(current_contents)) {
+ Handle<HeapObject> obj = handle(current_contents, isolate());
+ if (serializer_->SerializePendingObject(obj)) {
bytes_processed_so_far_ += kTaggedSize;
++current;
continue;
@@ -783,12 +845,11 @@ void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
// immortal immovable root members.
MaybeObjectSlot repeat_end = current + 1;
if (repeat_end < end &&
- serializer_->root_index_map()->Lookup(current_contents,
- &root_index) &&
+ serializer_->root_index_map()->Lookup(*obj, &root_index) &&
RootsTable::IsImmortalImmovable(root_index) &&
*current == *repeat_end) {
DCHECK_EQ(reference_type, HeapObjectReferenceType::STRONG);
- DCHECK(!Heap::InYoungGeneration(current_contents));
+ DCHECK(!Heap::InYoungGeneration(*obj));
while (repeat_end < end && *repeat_end == *current) {
repeat_end++;
}
@@ -801,18 +862,11 @@ void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
++current;
}
// Now write the object itself.
- serializer_->SerializeObject(current_contents);
+ serializer_->SerializeObject(obj);
}
}
}
-void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code host,
- RelocInfo* rinfo) {
- Object object = rinfo->target_object();
- serializer_->SerializeObject(HeapObject::cast(object));
- bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
void Serializer::ObjectSerializer::OutputExternalReference(Address target,
int target_size,
bool sandboxify) {
@@ -834,7 +888,7 @@ void Serializer::ObjectSerializer::OutputExternalReference(Address target,
// serialization and deserialization. We can serialize seen external
// references verbatim.
CHECK(serializer_->allow_unknown_external_references_for_testing());
- CHECK(IsAligned(target_size, kObjectAlignment));
+ CHECK(IsAligned(target_size, kTaggedSize));
CHECK_LE(target_size, kFixedRawDataCount * kTaggedSize);
int size_in_tagged = target_size >> kTaggedSizeLog2;
sink_->Put(FixedRawDataWithSize::Encode(size_in_tagged), "FixedRawData");
@@ -854,13 +908,56 @@ void Serializer::ObjectSerializer::OutputExternalReference(Address target,
}
sink_->PutInt(encoded_reference.index(), "reference index");
}
- bytes_processed_so_far_ += target_size;
}
void Serializer::ObjectSerializer::VisitExternalReference(Foreign host,
Address* p) {
// "Sandboxify" external reference.
OutputExternalReference(host.foreign_address(), kExternalPointerSize, true);
+ bytes_processed_so_far_ += kExternalPointerSize;
+}
+
+class Serializer::ObjectSerializer::RelocInfoObjectPreSerializer {
+ public:
+ explicit RelocInfoObjectPreSerializer(Serializer* serializer)
+ : serializer_(serializer) {}
+
+ void VisitEmbeddedPointer(Code host, RelocInfo* target) {
+ Object object = target->target_object();
+ serializer_->SerializeObject(handle(HeapObject::cast(object), isolate()));
+ num_serialized_objects_++;
+ }
+ void VisitCodeTarget(Code host, RelocInfo* target) {
+#ifdef V8_TARGET_ARCH_ARM
+ DCHECK(!RelocInfo::IsRelativeCodeTarget(target->rmode()));
+#endif
+ Code object = Code::GetCodeFromTargetAddress(target->target_address());
+ serializer_->SerializeObject(handle(object, isolate()));
+ num_serialized_objects_++;
+ }
+
+ void VisitExternalReference(Code host, RelocInfo* rinfo) {}
+ void VisitInternalReference(Code host, RelocInfo* rinfo) {}
+ void VisitRuntimeEntry(Code host, RelocInfo* reloc) { UNREACHABLE(); }
+ void VisitOffHeapTarget(Code host, RelocInfo* target) {}
+
+ int num_serialized_objects() const { return num_serialized_objects_; }
+
+ Isolate* isolate() { return serializer_->isolate(); }
+
+ private:
+ Serializer* serializer_;
+ int num_serialized_objects_ = 0;
+};
+
+void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code host,
+ RelocInfo* rinfo) {
+ // Target object should be pre-serialized by RelocInfoObjectPreSerializer, so
+ // just track the pointer's existence as kTaggedSize in
+ // bytes_processed_so_far_.
+ // TODO(leszeks): DCHECK that RelocInfoObjectPreSerializer serialized this
+ // specific object already.
+ bytes_processed_so_far_ += kTaggedSize;
}
void Serializer::ObjectSerializer::VisitExternalReference(Code host,
@@ -875,10 +972,14 @@ void Serializer::ObjectSerializer::VisitExternalReference(Code host,
void Serializer::ObjectSerializer::VisitInternalReference(Code host,
RelocInfo* rinfo) {
- Address entry = Code::cast(object_).entry();
+ Address entry = Handle<Code>::cast(object_)->entry();
DCHECK_GE(rinfo->target_internal_reference(), entry);
uintptr_t target_offset = rinfo->target_internal_reference() - entry;
- DCHECK_LE(target_offset, Code::cast(object_).raw_instruction_size());
+ // TODO(jgruber,v8:11036): We are being permissive for this DCHECK, but
+ // consider using raw_instruction_size() instead of raw_body_size() in the
+ // future.
+ STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
+ DCHECK_LE(target_offset, Handle<Code>::cast(object_)->raw_body_size());
sink_->Put(kInternalReference, "InternalRef");
sink_->PutInt(target_offset, "internal ref value");
}
@@ -896,22 +997,21 @@ void Serializer::ObjectSerializer::VisitOffHeapTarget(Code host,
Address addr = rinfo->target_off_heap_target();
CHECK_NE(kNullAddress, addr);
- Code target = InstructionStream::TryLookupCode(serializer_->isolate(), addr);
+ Code target = InstructionStream::TryLookupCode(isolate(), addr);
CHECK(Builtins::IsIsolateIndependentBuiltin(target));
sink_->Put(kOffHeapTarget, "OffHeapTarget");
sink_->PutInt(target.builtin_index(), "builtin index");
- bytes_processed_so_far_ += rinfo->target_address_size();
}
void Serializer::ObjectSerializer::VisitCodeTarget(Code host,
RelocInfo* rinfo) {
-#ifdef V8_TARGET_ARCH_ARM
- DCHECK(!RelocInfo::IsRelativeCodeTarget(rinfo->rmode()));
-#endif
- Code object = Code::GetCodeFromTargetAddress(rinfo->target_address());
- serializer_->SerializeObject(object);
- bytes_processed_so_far_ += rinfo->target_address_size();
+ // Target object should be pre-serialized by RelocInfoObjectPreSerializer, so
+ // just track the pointer's existence as kTaggedSize in
+ // bytes_processed_so_far_.
+ // TODO(leszeks): DCHECK that RelocInfoObjectPreSerializer serialized this
+ // specific object already.
+ bytes_processed_so_far_ += kTaggedSize;
}
namespace {
@@ -940,35 +1040,36 @@ void OutputRawWithCustomField(SnapshotByteSink* sink, Address object_start,
} // anonymous namespace
void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
- Address object_start = object_.address();
+ Address object_start = object_->address();
int base = bytes_processed_so_far_;
int up_to_offset = static_cast<int>(up_to - object_start);
int to_skip = up_to_offset - bytes_processed_so_far_;
int bytes_to_output = to_skip;
+ DCHECK(IsAligned(bytes_to_output, kTaggedSize));
+ int tagged_to_output = bytes_to_output / kTaggedSize;
bytes_processed_so_far_ += to_skip;
DCHECK_GE(to_skip, 0);
if (bytes_to_output != 0) {
DCHECK(to_skip == bytes_to_output);
- if (IsAligned(bytes_to_output, kObjectAlignment) &&
- bytes_to_output <= kFixedRawDataCount * kTaggedSize) {
- int size_in_tagged = bytes_to_output >> kTaggedSizeLog2;
- sink_->Put(FixedRawDataWithSize::Encode(size_in_tagged), "FixedRawData");
+ if (tagged_to_output <= kFixedRawDataCount) {
+ sink_->Put(FixedRawDataWithSize::Encode(tagged_to_output),
+ "FixedRawData");
} else {
sink_->Put(kVariableRawData, "VariableRawData");
- sink_->PutInt(bytes_to_output, "length");
+ sink_->PutInt(tagged_to_output, "length");
}
#ifdef MEMORY_SANITIZER
// Check that we do not serialize uninitialized memory.
__msan_check_mem_is_initialized(
reinterpret_cast<void*>(object_start + base), bytes_to_output);
#endif // MEMORY_SANITIZER
- if (object_.IsBytecodeArray()) {
+ if (object_->IsBytecodeArray()) {
// The bytecode age field can be changed by GC concurrently.
byte field_value = BytecodeArray::kNoAgeBytecodeAge;
OutputRawWithCustomField(sink_, object_start, base, bytes_to_output,
BytecodeArray::kBytecodeAgeOffset,
sizeof(field_value), &field_value);
- } else if (object_.IsDescriptorArray()) {
+ } else if (object_->IsDescriptorArray()) {
// The number of marked descriptors field can be changed by GC
// concurrently.
byte field_value[2];
@@ -985,26 +1086,30 @@ void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
}
}
-void Serializer::ObjectSerializer::OutputCode(int size) {
- DCHECK_EQ(kTaggedSize, bytes_processed_so_far_);
- Code on_heap_code = Code::cast(object_);
- // To make snapshots reproducible, we make a copy of the code object
- // and wipe all pointers in the copy, which we then serialize.
- Code off_heap_code = serializer_->CopyCode(on_heap_code);
- int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
- RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+void Serializer::ObjectSerializer::SerializeCode(Map map, int size) {
+ static const int kWipeOutModeMask =
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+ DCHECK_EQ(HeapObject::kHeaderSize, bytes_processed_so_far_);
+ Handle<Code> on_heap_code = Handle<Code>::cast(object_);
+
// With enabled pointer compression normal accessors no longer work for
// off-heap objects, so we have to get the relocation info data via the
// on-heap code object.
- ByteArray relocation_info = on_heap_code.unchecked_relocation_info();
- for (RelocIterator it(off_heap_code, relocation_info, mode_mask); !it.done();
- it.next()) {
+ ByteArray relocation_info = on_heap_code->unchecked_relocation_info();
+
+ // To make snapshots reproducible, we make a copy of the code object
+ // and wipe all pointers in the copy, which we then serialize.
+ Code off_heap_code = serializer_->CopyCode(*on_heap_code);
+ for (RelocIterator it(off_heap_code, relocation_info, kWipeOutModeMask);
+ !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
rinfo->WipeOut();
}
@@ -1012,12 +1117,18 @@ void Serializer::ObjectSerializer::OutputCode(int size) {
// relocations, because some of these fields are needed for the latter.
off_heap_code.WipeOutHeader();
+ // Initially skip serializing the code header. We'll serialize it after the
+ // Code body, so that the various fields the Code needs for iteration are
+ // already valid.
+ sink_->Put(kCodeBody, "kCodeBody");
+
+ // Now serialize the wiped off-heap Code, as length + data.
Address start = off_heap_code.address() + Code::kDataStart;
int bytes_to_output = size - Code::kDataStart;
DCHECK(IsAligned(bytes_to_output, kTaggedSize));
+ int tagged_to_output = bytes_to_output / kTaggedSize;
- sink_->Put(kVariableRawCode, "VariableRawCode");
- sink_->PutInt(bytes_to_output, "length");
+ sink_->PutInt(tagged_to_output, "length");
#ifdef MEMORY_SANITIZER
// Check that we do not serialize uninitialized memory.
@@ -1025,6 +1136,59 @@ void Serializer::ObjectSerializer::OutputCode(int size) {
bytes_to_output);
#endif // MEMORY_SANITIZER
sink_->PutRaw(reinterpret_cast<byte*>(start), bytes_to_output, "Code");
+
+ // Manually serialize the code header. We don't use Code::BodyDescriptor
+ // here as we don't yet want to walk the RelocInfos.
+ DCHECK_EQ(HeapObject::kHeaderSize, bytes_processed_so_far_);
+ VisitPointers(*on_heap_code, on_heap_code->RawField(HeapObject::kHeaderSize),
+ on_heap_code->RawField(Code::kDataStart));
+ DCHECK_EQ(bytes_processed_so_far_, Code::kDataStart);
+
+ // Now serialize RelocInfos. We can't allocate during a RelocInfo walk during
+ // deserualization, so we have two passes for RelocInfo serialization:
+ // 1. A pre-serializer which serializes all allocatable objects in the
+ // RelocInfo, followed by a kSynchronize bytecode, and
+ // 2. A walk the RelocInfo with this serializer, serializing any objects
+ // implicitly as offsets into the pre-serializer's object array.
+ // This way, the deserializer can deserialize the allocatable objects first,
+ // without walking RelocInfo, re-build the pre-serializer's object array, and
+ // only then walk the RelocInfo itself.
+ // TODO(leszeks): We only really need to pre-serialize objects which need
+ // serialization, i.e. no backrefs or roots.
+ RelocInfoObjectPreSerializer pre_serializer(serializer_);
+ for (RelocIterator it(*on_heap_code, relocation_info,
+ Code::BodyDescriptor::kRelocModeMask);
+ !it.done(); it.next()) {
+ it.rinfo()->Visit(&pre_serializer);
+ }
+ // Mark that the pre-serialization finished with a kSynchronize bytecode.
+ sink_->Put(kSynchronize, "PreSerializationFinished");
+
+ // Finally serialize all RelocInfo objects in the on-heap Code, knowing that
+ // we will not do a recursive serialization.
+ // TODO(leszeks): Add a scope that DCHECKs this.
+ for (RelocIterator it(*on_heap_code, relocation_info,
+ Code::BodyDescriptor::kRelocModeMask);
+ !it.done(); it.next()) {
+ it.rinfo()->Visit(this);
+ }
+
+ // We record a kTaggedSize for every object encountered during the
+ // serialization, so DCHECK that bytes_processed_so_far_ matches the expected
+ // number of bytes (i.e. the code header + a tagged size per pre-serialized
+ // object).
+ DCHECK_EQ(
+ bytes_processed_so_far_,
+ Code::kDataStart + kTaggedSize * pre_serializer.num_serialized_objects());
+}
+
+Serializer::HotObjectsList::HotObjectsList(Heap* heap) : heap_(heap) {
+ strong_roots_entry_ =
+ heap->RegisterStrongRoots(FullObjectSlot(&circular_queue_[0]),
+ FullObjectSlot(&circular_queue_[kSize]));
+}
+Serializer::HotObjectsList::~HotObjectsList() {
+ heap_->UnregisterStrongRoots(strong_roots_entry_);
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index 87f02f5c42..e04d08b256 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -8,14 +8,16 @@
#include <map>
#include "src/codegen/external-reference-encoder.h"
+#include "src/common/assert-scope.h"
#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
#include "src/logging/log.h"
#include "src/objects/objects.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/snapshot/serializer-allocator.h"
#include "src/snapshot/serializer-deserializer.h"
#include "src/snapshot/snapshot-source-sink.h"
#include "src/snapshot/snapshot.h"
+#include "src/utils/identity-map.h"
namespace v8 {
namespace internal {
@@ -45,6 +47,8 @@ class CodeAddressMap : public CodeEventLogger {
class NameMap {
public:
NameMap() : impl_() {}
+ NameMap(const NameMap&) = delete;
+ NameMap& operator=(const NameMap&) = delete;
~NameMap() {
for (base::HashMap::Entry* p = impl_.Start(); p != nullptr;
@@ -112,8 +116,6 @@ class CodeAddressMap : public CodeEventLogger {
}
base::HashMap impl_;
-
- DISALLOW_COPY_AND_ASSIGN(NameMap);
};
void LogRecordedBuffer(Handle<AbstractCode> code,
@@ -132,52 +134,48 @@ class CodeAddressMap : public CodeEventLogger {
class ObjectCacheIndexMap {
public:
- ObjectCacheIndexMap() : map_(), next_index_(0) {}
+ explicit ObjectCacheIndexMap(Heap* heap) : map_(heap), next_index_(0) {}
+ ObjectCacheIndexMap(const ObjectCacheIndexMap&) = delete;
+ ObjectCacheIndexMap& operator=(const ObjectCacheIndexMap&) = delete;
// If |obj| is in the map, immediately return true. Otherwise add it to the
// map and return false. In either case set |*index_out| to the index
// associated with the map.
- bool LookupOrInsert(HeapObject obj, int* index_out) {
- Maybe<uint32_t> maybe_index = map_.Get(obj);
- if (maybe_index.IsJust()) {
- *index_out = maybe_index.FromJust();
- return true;
+ bool LookupOrInsert(Handle<HeapObject> obj, int* index_out) {
+ auto find_result = map_.FindOrInsert(obj);
+ if (!find_result.already_exists) {
+ *find_result.entry = next_index_++;
}
- *index_out = next_index_;
- map_.Set(obj, next_index_++);
- return false;
+ *index_out = *find_result.entry;
+ return find_result.already_exists;
}
private:
DisallowHeapAllocation no_allocation_;
- HeapObjectToIndexHashMap map_;
+ IdentityMap<int, base::DefaultAllocationPolicy> map_;
int next_index_;
-
- DISALLOW_COPY_AND_ASSIGN(ObjectCacheIndexMap);
};
class Serializer : public SerializerDeserializer {
public:
Serializer(Isolate* isolate, Snapshot::SerializerFlags flags);
-
- std::vector<SerializedData::Reservation> EncodeReservations() const {
- return allocator_.EncodeReservations();
- }
+ ~Serializer() override { DCHECK_EQ(unresolved_forward_refs_, 0); }
+ Serializer(const Serializer&) = delete;
+ Serializer& operator=(const Serializer&) = delete;
const std::vector<byte>* Payload() const { return sink_.data(); }
- bool ReferenceMapContains(HeapObject o) {
- return reference_map()
- ->LookupReference(reinterpret_cast<void*>(o.ptr()))
- .is_valid();
+ bool ReferenceMapContains(Handle<HeapObject> o) {
+ return reference_map()->LookupReference(o) != nullptr;
}
Isolate* isolate() const { return isolate_; }
+ int TotalAllocationSize() const;
+
protected:
- using PendingObjectReference =
- std::map<HeapObject, std::vector<int>>::iterator;
+ using PendingObjectReferences = std::vector<int>*;
class ObjectSerializer;
class RecursionScope {
@@ -196,7 +194,8 @@ class Serializer : public SerializerDeserializer {
};
void SerializeDeferredObjects();
- virtual void SerializeObject(HeapObject o) = 0;
+ void SerializeObject(Handle<HeapObject> o);
+ virtual void SerializeObjectImpl(Handle<HeapObject> o) = 0;
virtual bool MustBeDeferred(HeapObject object);
@@ -204,36 +203,35 @@ class Serializer : public SerializerDeserializer {
FullObjectSlot start, FullObjectSlot end) override;
void SerializeRootObject(FullObjectSlot slot);
- void PutRoot(RootIndex root_index, HeapObject object);
+ void PutRoot(RootIndex root_index);
void PutSmiRoot(FullObjectSlot slot);
- void PutBackReference(HeapObject object, SerializerReference reference);
+ void PutBackReference(Handle<HeapObject> object,
+ SerializerReference reference);
void PutAttachedReference(SerializerReference reference);
- // Emit alignment prefix if necessary, return required padding space in bytes.
- int PutAlignmentPrefix(HeapObject object);
void PutNextChunk(SnapshotSpace space);
void PutRepeat(int repeat_count);
// Emit a marker noting that this slot is a forward reference to the an
// object which has not yet been serialized.
- void PutPendingForwardReferenceTo(PendingObjectReference reference);
+ void PutPendingForwardReference(PendingObjectReferences& ref);
// Resolve the given previously registered forward reference to the current
// object.
void ResolvePendingForwardReference(int obj);
// Returns true if the object was successfully serialized as a root.
- bool SerializeRoot(HeapObject obj);
+ bool SerializeRoot(Handle<HeapObject> obj);
// Returns true if the object was successfully serialized as hot object.
- bool SerializeHotObject(HeapObject obj);
+ bool SerializeHotObject(Handle<HeapObject> obj);
// Returns true if the object was successfully serialized as back reference.
- bool SerializeBackReference(HeapObject obj);
+ bool SerializeBackReference(Handle<HeapObject> obj);
// Returns true if the object was successfully serialized as pending object.
- bool SerializePendingObject(HeapObject obj);
+ bool SerializePendingObject(Handle<HeapObject> obj);
// Returns true if the given heap object is a bytecode handler code object.
- bool ObjectIsBytecodeHandler(HeapObject obj) const;
+ bool ObjectIsBytecodeHandler(Handle<HeapObject> obj) const;
ExternalReferenceEncoder::Value EncodeExternalReference(Address addr) {
return external_reference_encoder_.Encode(addr);
@@ -253,36 +251,32 @@ class Serializer : public SerializerDeserializer {
Code CopyCode(Code code);
- void QueueDeferredObject(HeapObject obj) {
- DCHECK(!reference_map_.LookupReference(reinterpret_cast<void*>(obj.ptr()))
- .is_valid());
- deferred_objects_.push_back(obj);
+ void QueueDeferredObject(Handle<HeapObject> obj) {
+ DCHECK_NULL(reference_map_.LookupReference(obj));
+ deferred_objects_.Push(*obj);
}
// Register that the the given object shouldn't be immediately serialized, but
// will be serialized later and any references to it should be pending forward
// references.
- PendingObjectReference RegisterObjectIsPending(HeapObject obj);
+ void RegisterObjectIsPending(Handle<HeapObject> obj);
// Resolve the given pending object reference with the current object.
- void ResolvePendingObject(PendingObjectReference ref);
+ void ResolvePendingObject(Handle<HeapObject> obj);
void OutputStatistics(const char* name);
-#ifdef OBJECT_PRINT
- void CountInstanceType(Map map, int size, SnapshotSpace space);
-#endif // OBJECT_PRINT
+ void CountAllocation(Map map, int size, SnapshotSpace space);
#ifdef DEBUG
- void PushStack(HeapObject o) { stack_.push_back(o); }
- void PopStack() { stack_.pop_back(); }
+ void PushStack(Handle<HeapObject> o) { stack_.Push(*o); }
+ void PopStack() { stack_.Pop(); }
void PrintStack();
void PrintStack(std::ostream&);
#endif // DEBUG
SerializerReferenceMap* reference_map() { return &reference_map_; }
const RootIndexMap* root_index_map() const { return &root_index_map_; }
- SerializerAllocator* allocator() { return &allocator_; }
SnapshotByteSink sink_; // Used directly by subclasses.
@@ -294,17 +288,62 @@ class Serializer : public SerializerDeserializer {
}
private:
+ // A circular queue of hot objects. This is added to in the same order as in
+ // Deserializer::HotObjectsList, but this stores the objects as an array of
+ // raw addresses that are considered strong roots. This allows objects to be
+ // added to the list without having to extend their handle's lifetime.
+ //
+ // We should never allow this class to return Handles to objects in the queue,
+ // as the object in the queue may change if kSize other objects are added to
+ // the queue during that Handle's lifetime.
+ class HotObjectsList {
+ public:
+ explicit HotObjectsList(Heap* heap);
+ ~HotObjectsList();
+ HotObjectsList(const HotObjectsList&) = delete;
+ HotObjectsList& operator=(const HotObjectsList&) = delete;
+
+ void Add(HeapObject object) {
+ circular_queue_[index_] = object.ptr();
+ index_ = (index_ + 1) & kSizeMask;
+ }
+
+ static const int kNotFound = -1;
+
+ int Find(HeapObject object) {
+ DCHECK(!AllowGarbageCollection::IsAllowed());
+ for (int i = 0; i < kSize; i++) {
+ if (circular_queue_[i] == object.ptr()) {
+ return i;
+ }
+ }
+ return kNotFound;
+ }
+
+ private:
+ static const int kSize = kHotObjectCount;
+ static const int kSizeMask = kSize - 1;
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(kSize));
+ Heap* heap_;
+ StrongRootsEntry* strong_roots_entry_;
+ Address circular_queue_[kSize] = {kNullAddress};
+ int index_ = 0;
+ };
+
// Disallow GC during serialization.
// TODO(leszeks, v8:10815): Remove this constraint.
- DisallowHeapAllocation no_gc;
+ DISALLOW_HEAP_ALLOCATION(no_gc)
Isolate* isolate_;
+ HotObjectsList hot_objects_;
SerializerReferenceMap reference_map_;
ExternalReferenceEncoder external_reference_encoder_;
RootIndexMap root_index_map_;
std::unique_ptr<CodeAddressMap> code_address_map_;
std::vector<byte> code_buffer_;
- std::vector<HeapObject> deferred_objects_; // To handle stack overflow.
+ GlobalHandleVector<HeapObject>
+ deferred_objects_; // To handle stack overflow.
+ int num_back_refs_ = 0;
// Objects which have started being serialized, but haven't yet been allocated
// with the allocator, are considered "pending". References to them don't have
@@ -319,34 +358,40 @@ class Serializer : public SerializerDeserializer {
// forward refs remaining.
int next_forward_ref_id_ = 0;
int unresolved_forward_refs_ = 0;
- std::map<HeapObject, std::vector<int>> forward_refs_per_pending_object_;
+ IdentityMap<PendingObjectReferences, base::DefaultAllocationPolicy>
+ forward_refs_per_pending_object_;
+
+ // Used to keep track of the off-heap backing stores used by TypedArrays/
+ // ArrayBuffers. Note that the index begins at 1 and not 0, because when a
+ // TypedArray has an on-heap backing store, the backing_store pointer in the
+ // corresponding ArrayBuffer will be null, which makes it indistinguishable
+ // from index 0.
+ uint32_t seen_backing_stores_index_ = 1;
int recursion_depth_ = 0;
const Snapshot::SerializerFlags flags_;
- SerializerAllocator allocator_;
+ size_t allocation_size_[kNumberOfSnapshotSpaces] = {0};
#ifdef OBJECT_PRINT
static constexpr int kInstanceTypes = LAST_TYPE + 1;
- std::unique_ptr<int[]> instance_type_count_[kNumberOfSpaces];
- std::unique_ptr<size_t[]> instance_type_size_[kNumberOfSpaces];
+ std::unique_ptr<int[]> instance_type_count_[kNumberOfSnapshotSpaces];
+ std::unique_ptr<size_t[]> instance_type_size_[kNumberOfSnapshotSpaces];
#endif // OBJECT_PRINT
#ifdef DEBUG
- std::vector<HeapObject> stack_;
+ GlobalHandleVector<HeapObject> back_refs_;
+ GlobalHandleVector<HeapObject> stack_;
#endif // DEBUG
-
- friend class SerializerAllocator;
-
- DISALLOW_COPY_AND_ASSIGN(Serializer);
};
class RelocInfoIterator;
class Serializer::ObjectSerializer : public ObjectVisitor {
public:
- ObjectSerializer(Serializer* serializer, HeapObject obj,
+ ObjectSerializer(Serializer* serializer, Handle<HeapObject> obj,
SnapshotByteSink* sink)
- : serializer_(serializer),
+ : isolate_(serializer->isolate()),
+ serializer_(serializer),
object_(obj),
sink_(sink),
bytes_processed_so_far_(0) {
@@ -375,7 +420,11 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
void VisitRuntimeEntry(Code host, RelocInfo* reloc) override;
void VisitOffHeapTarget(Code host, RelocInfo* target) override;
+ Isolate* isolate() { return isolate_; }
+
private:
+ class RelocInfoObjectPreSerializer;
+
void SerializePrologue(SnapshotSpace space, int size, Map map);
// This function outputs or skips the raw data between the last pointer and
@@ -384,15 +433,16 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
void OutputExternalReference(Address target, int target_size,
bool sandboxify);
void OutputRawData(Address up_to);
- void OutputCode(int size);
+ void SerializeCode(Map map, int size);
uint32_t SerializeBackingStore(void* backing_store, int32_t byte_length);
void SerializeJSTypedArray();
void SerializeJSArrayBuffer();
void SerializeExternalString();
void SerializeExternalStringAsSequentialString();
+ Isolate* isolate_;
Serializer* serializer_;
- HeapObject object_;
+ Handle<HeapObject> object_;
SnapshotByteSink* sink_;
int bytes_processed_so_far_;
};
diff --git a/deps/v8/src/snapshot/snapshot-data.cc b/deps/v8/src/snapshot/snapshot-data.cc
index 870945cdce..0a5bbaaf2a 100644
--- a/deps/v8/src/snapshot/snapshot-data.cc
+++ b/deps/v8/src/snapshot/snapshot-data.cc
@@ -26,51 +26,28 @@ constexpr uint32_t SerializedData::kMagicNumber;
SnapshotData::SnapshotData(const Serializer* serializer) {
DisallowGarbageCollection no_gc;
- std::vector<Reservation> reservations = serializer->EncodeReservations();
const std::vector<byte>* payload = serializer->Payload();
// Calculate sizes.
- uint32_t reservation_size =
- static_cast<uint32_t>(reservations.size()) * kUInt32Size;
- uint32_t payload_offset = kHeaderSize + reservation_size;
- uint32_t padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
- uint32_t size =
- padded_payload_offset + static_cast<uint32_t>(payload->size());
+ uint32_t size = kHeaderSize + static_cast<uint32_t>(payload->size());
// Allocate backing store and create result data.
AllocateData(size);
// Zero out pre-payload data. Part of that is only used for padding.
- memset(data_, 0, padded_payload_offset);
+ memset(data_, 0, kHeaderSize);
// Set header values.
SetMagicNumber();
- SetHeaderValue(kNumReservationsOffset, static_cast<int>(reservations.size()));
SetHeaderValue(kPayloadLengthOffset, static_cast<int>(payload->size()));
- // Copy reservation chunk sizes.
- CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.data()),
- reservation_size);
-
// Copy serialized data.
- CopyBytes(data_ + padded_payload_offset, payload->data(),
+ CopyBytes(data_ + kHeaderSize, payload->data(),
static_cast<size_t>(payload->size()));
}
-std::vector<SerializedData::Reservation> SnapshotData::Reservations() const {
- uint32_t size = GetHeaderValue(kNumReservationsOffset);
- std::vector<SerializedData::Reservation> reservations(size);
- memcpy(reservations.data(), data_ + kHeaderSize,
- size * sizeof(SerializedData::Reservation));
- return reservations;
-}
-
Vector<const byte> SnapshotData::Payload() const {
- uint32_t reservations_size =
- GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
- uint32_t padded_payload_offset =
- POINTER_SIZE_ALIGN(kHeaderSize + reservations_size);
- const byte* payload = data_ + padded_payload_offset;
+ const byte* payload = data_ + kHeaderSize;
uint32_t length = GetHeaderValue(kPayloadLengthOffset);
DCHECK_EQ(data_ + size_, payload + length);
return Vector<const byte>(payload, length);
diff --git a/deps/v8/src/snapshot/snapshot-data.h b/deps/v8/src/snapshot/snapshot-data.h
index b8a9133e7f..a7d6872bc6 100644
--- a/deps/v8/src/snapshot/snapshot-data.h
+++ b/deps/v8/src/snapshot/snapshot-data.h
@@ -20,21 +20,6 @@ class Serializer;
class SerializedData {
public:
- class Reservation {
- public:
- Reservation() : reservation_(0) {}
- explicit Reservation(uint32_t size)
- : reservation_(ChunkSizeBits::encode(size)) {}
-
- uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation_); }
- bool is_last() const { return IsLastChunkBits::decode(reservation_); }
-
- void mark_as_last() { reservation_ |= IsLastChunkBits::encode(true); }
-
- private:
- uint32_t reservation_;
- };
-
SerializedData(byte* data, int size)
: data_(data), size_(size), owns_data_(false) {}
SerializedData() : data_(nullptr), size_(0), owns_data_(false) {}
@@ -45,6 +30,8 @@ class SerializedData {
// Ensure |other| will not attempt to destroy our data in destructor.
other.owns_data_ = false;
}
+ SerializedData(const SerializedData&) = delete;
+ SerializedData& operator=(const SerializedData&) = delete;
virtual ~SerializedData() {
if (owns_data_) DeleteArray<byte>(data_);
@@ -77,9 +64,6 @@ class SerializedData {
byte* data_;
uint32_t size_;
bool owns_data_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SerializedData);
};
// Wrapper around reservation sizes and the serialization payload.
@@ -93,7 +77,6 @@ class V8_EXPORT_PRIVATE SnapshotData : public SerializedData {
: SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
}
- std::vector<Reservation> Reservations() const;
virtual Vector<const byte> Payload() const;
Vector<const byte> RawData() const {
@@ -112,14 +95,9 @@ class V8_EXPORT_PRIVATE SnapshotData : public SerializedData {
// The data header consists of uint32_t-sized entries:
// [0] magic number and (internal) external reference count
- // [1] number of reservation size entries
- // [2] payload length
- // ... reservations
+ // [1] payload length
// ... serialized payload
- static const uint32_t kNumReservationsOffset =
- kMagicNumberOffset + kUInt32Size;
- static const uint32_t kPayloadLengthOffset =
- kNumReservationsOffset + kUInt32Size;
+ static const uint32_t kPayloadLengthOffset = kMagicNumberOffset + kUInt32Size;
static const uint32_t kHeaderSize = kPayloadLengthOffset + kUInt32Size;
};
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index 9d44678679..f0686af3c0 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -7,7 +7,9 @@
#include <utility>
+#include "src/base/atomicops.h"
#include "src/base/logging.h"
+#include "src/common/globals.h"
#include "src/snapshot/snapshot-utils.h"
#include "src/utils/utils.h"
@@ -31,6 +33,8 @@ class SnapshotByteSource final {
: data_(payload.begin()), length_(payload.length()), position_(0) {}
~SnapshotByteSource() = default;
+ SnapshotByteSource(const SnapshotByteSource&) = delete;
+ SnapshotByteSource& operator=(const SnapshotByteSource&) = delete;
bool HasMore() { return position_ < length_; }
@@ -51,6 +55,30 @@ class SnapshotByteSource final {
position_ += number_of_bytes;
}
+ void CopySlots(Address* dest, int number_of_slots) {
+ base::AtomicWord* start = reinterpret_cast<base::AtomicWord*>(dest);
+ base::AtomicWord* end = start + number_of_slots;
+ for (base::AtomicWord* p = start; p < end;
+ ++p, position_ += sizeof(base::AtomicWord)) {
+ base::AtomicWord val;
+ memcpy(&val, data_ + position_, sizeof(base::AtomicWord));
+ base::Relaxed_Store(p, val);
+ }
+ }
+
+#ifdef V8_COMPRESS_POINTERS
+ void CopySlots(Tagged_t* dest, int number_of_slots) {
+ AtomicTagged_t* start = reinterpret_cast<AtomicTagged_t*>(dest);
+ AtomicTagged_t* end = start + number_of_slots;
+ for (AtomicTagged_t* p = start; p < end;
+ ++p, position_ += sizeof(AtomicTagged_t)) {
+ AtomicTagged_t val;
+ memcpy(&val, data_ + position_, sizeof(AtomicTagged_t));
+ base::Relaxed_Store(p, val);
+ }
+ }
+#endif
+
inline int GetInt() {
// This way of decoding variable-length encoded integers does not
// suffer from branch mispredictions.
@@ -82,8 +110,6 @@ class SnapshotByteSource final {
const byte* data_;
int length_;
int position_;
-
- DISALLOW_COPY_AND_ASSIGN(SnapshotByteSource);
};
/**
diff --git a/deps/v8/src/snapshot/snapshot-utils.cc b/deps/v8/src/snapshot/snapshot-utils.cc
index 319b828446..eb2372372c 100644
--- a/deps/v8/src/snapshot/snapshot-utils.cc
+++ b/deps/v8/src/snapshot/snapshot-utils.cc
@@ -21,20 +21,5 @@ uint32_t Checksum(Vector<const byte> payload) {
return static_cast<uint32_t>(adler32(0, payload.begin(), payload.length()));
}
-V8_EXPORT_PRIVATE uint32_t Checksum(Vector<const byte> payload1,
- Vector<const byte> payload2) {
-#ifdef MEMORY_SANITIZER
- // Computing the checksum includes padding bytes for objects like strings.
- // Mark every object as initialized in the code serializer.
- MSAN_MEMORY_IS_INITIALIZED(payload1.begin(), payload1.length());
- MSAN_MEMORY_IS_INITIALIZED(payload2.begin(), payload2.length());
-#endif // MEMORY_SANITIZER
- // Priming the adler32 call so it can see what CPU features are available.
- adler32(0, nullptr, 0);
- auto sum = adler32(0, payload1.begin(), payload1.length());
- sum = adler32(sum, payload2.begin(), payload2.length());
- return static_cast<uint32_t>(sum);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot-utils.h b/deps/v8/src/snapshot/snapshot-utils.h
index 284bbcd4a5..045813b139 100644
--- a/deps/v8/src/snapshot/snapshot-utils.h
+++ b/deps/v8/src/snapshot/snapshot-utils.h
@@ -11,8 +11,6 @@ namespace v8 {
namespace internal {
V8_EXPORT_PRIVATE uint32_t Checksum(Vector<const byte> payload);
-V8_EXPORT_PRIVATE uint32_t Checksum(Vector<const byte> payload1,
- Vector<const byte> payload2);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot.cc b/deps/v8/src/snapshot/snapshot.cc
index 7e3f072659..86d0544667 100644
--- a/deps/v8/src/snapshot/snapshot.cc
+++ b/deps/v8/src/snapshot/snapshot.cc
@@ -158,12 +158,9 @@ bool Snapshot::Initialize(Isolate* isolate) {
SnapshotData startup_snapshot_data(MaybeDecompress(startup_data));
SnapshotData read_only_snapshot_data(MaybeDecompress(read_only_data));
- StartupDeserializer startup_deserializer(&startup_snapshot_data);
- ReadOnlyDeserializer read_only_deserializer(&read_only_snapshot_data);
- startup_deserializer.SetRehashability(ExtractRehashability(blob));
- read_only_deserializer.SetRehashability(ExtractRehashability(blob));
- bool success =
- isolate->InitWithSnapshot(&read_only_deserializer, &startup_deserializer);
+ bool success = isolate->InitWithSnapshot(&startup_snapshot_data,
+ &read_only_snapshot_data,
+ ExtractRehashability(blob));
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
int bytes = startup_data.length();
@@ -317,30 +314,6 @@ void Snapshot::SerializeDeserializeAndVerifyForTesting(
Isolate::Delete(new_isolate);
}
-void ProfileDeserialization(
- const SnapshotData* read_only_snapshot,
- const SnapshotData* startup_snapshot,
- const std::vector<SnapshotData*>& context_snapshots) {
- if (FLAG_profile_deserialization) {
- int startup_total = 0;
- PrintF("Deserialization will reserve:\n");
- for (const auto& reservation : read_only_snapshot->Reservations()) {
- startup_total += reservation.chunk_size();
- }
- for (const auto& reservation : startup_snapshot->Reservations()) {
- startup_total += reservation.chunk_size();
- }
- PrintF("%10d bytes per isolate\n", startup_total);
- for (size_t i = 0; i < context_snapshots.size(); i++) {
- int context_total = 0;
- for (const auto& reservation : context_snapshots[i]->Reservations()) {
- context_total += reservation.chunk_size();
- }
- PrintF("%10d bytes per context #%zu\n", context_total, i);
- }
- }
-}
-
// static
constexpr Snapshot::SerializerFlags Snapshot::kDefaultSerializerFlags;
@@ -352,6 +325,7 @@ v8::StartupData Snapshot::Create(
const DisallowGarbageCollection& no_gc, SerializerFlags flags) {
DCHECK_EQ(contexts->size(), embedder_fields_serializers.size());
DCHECK_GT(contexts->size(), 0);
+ HandleScope scope(isolate);
// Enter a safepoint so that the heap is safe to iterate.
// TODO(leszeks): This safepoint's scope could be tightened to just string
@@ -374,12 +348,17 @@ v8::StartupData Snapshot::Create(
// TODO(v8:6593): generalize rehashing, and remove this flag.
bool can_be_rehashed = true;
+ std::vector<int> context_allocation_sizes;
for (int i = 0; i < num_contexts; i++) {
ContextSerializer context_serializer(isolate, flags, &startup_serializer,
embedder_fields_serializers[i]);
context_serializer.Serialize(&contexts->at(i), no_gc);
can_be_rehashed = can_be_rehashed && context_serializer.can_be_rehashed();
context_snapshots.push_back(new SnapshotData(&context_serializer));
+ if (FLAG_profile_deserialization) {
+ context_allocation_sizes.push_back(
+ context_serializer.TotalAllocationSize());
+ }
}
startup_serializer.SerializeWeakReferencesAndDeferred();
@@ -390,6 +369,17 @@ v8::StartupData Snapshot::Create(
read_only_serializer.FinalizeSerialization();
can_be_rehashed = can_be_rehashed && read_only_serializer.can_be_rehashed();
+ if (FLAG_profile_deserialization) {
+ // These prints should match the regexp in test/memory/Memory.json
+ PrintF("Deserialization will allocate:\n");
+ PrintF("%10d bytes per isolate\n",
+ read_only_serializer.TotalAllocationSize() +
+ startup_serializer.TotalAllocationSize());
+ for (int i = 0; i < num_contexts; i++) {
+ PrintF("%10d bytes per context #%d\n", context_allocation_sizes[i], i);
+ }
+ }
+
SnapshotData read_only_snapshot(&read_only_serializer);
SnapshotData startup_snapshot(&startup_serializer);
v8::StartupData result =
@@ -454,9 +444,6 @@ v8::StartupData SnapshotImpl::CreateSnapshotBlob(
total_length += static_cast<uint32_t>(context_snapshot->RawData().length());
}
- ProfileDeserialization(read_only_snapshot_in, startup_snapshot_in,
- context_snapshots_in);
-
char* data = new char[total_length];
// Zero out pre-payload data. Part of that is only used for padding.
memset(data, 0, SnapshotImpl::StartupSnapshotOffset(num_contexts));
@@ -480,9 +467,8 @@ v8::StartupData SnapshotImpl::CreateSnapshotBlob(
reinterpret_cast<const char*>(startup_snapshot->RawData().begin()),
payload_length);
if (FLAG_profile_deserialization) {
- PrintF("Snapshot blob consists of:\n%10d bytes in %d chunks for startup\n",
- payload_length,
- static_cast<uint32_t>(startup_snapshot_in->Reservations().size()));
+ PrintF("Snapshot blob consists of:\n%10d bytes for startup\n",
+ payload_length);
}
payload_offset += payload_length;
@@ -510,10 +496,7 @@ v8::StartupData SnapshotImpl::CreateSnapshotBlob(
reinterpret_cast<const char*>(context_snapshot->RawData().begin()),
payload_length);
if (FLAG_profile_deserialization) {
- PrintF(
- "%10d bytes in %d chunks for context #%d\n", payload_length,
- static_cast<uint32_t>(context_snapshots_in[i]->Reservations().size()),
- i);
+ PrintF("%10d bytes for context #%d\n", payload_length, i);
}
payload_offset += payload_length;
}
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index 3288aff509..b019091ee9 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -14,36 +14,35 @@
namespace v8 {
namespace internal {
-void StartupDeserializer::DeserializeInto(Isolate* isolate) {
- Initialize(isolate);
-
- if (!allocator()->ReserveSpace()) {
- V8::FatalProcessOutOfMemory(isolate, "StartupDeserializer");
- }
+void StartupDeserializer::DeserializeIntoIsolate() {
+ HandleScope scope(isolate());
// No active threads.
- DCHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
+ DCHECK_NULL(isolate()->thread_manager()->FirstThreadStateInUse());
// No active handles.
- DCHECK(isolate->handle_scope_implementer()->blocks()->empty());
+ DCHECK(isolate()->handle_scope_implementer()->blocks()->empty());
// Startup object cache is not yet populated.
- DCHECK(isolate->startup_object_cache()->empty());
+ DCHECK(isolate()->startup_object_cache()->empty());
// Builtins are not yet created.
- DCHECK(!isolate->builtins()->is_initialized());
+ DCHECK(!isolate()->builtins()->is_initialized());
{
- DisallowGarbageCollection no_gc;
- isolate->heap()->IterateSmiRoots(this);
- isolate->heap()->IterateRoots(
+ isolate()->heap()->IterateSmiRoots(this);
+ isolate()->heap()->IterateRoots(
this,
base::EnumSet<SkipRoot>{SkipRoot::kUnserializable, SkipRoot::kWeak});
- Iterate(isolate, this);
+ Iterate(isolate(), this);
DeserializeStringTable();
- isolate->heap()->IterateWeakRoots(
+ isolate()->heap()->IterateWeakRoots(
this, base::EnumSet<SkipRoot>{SkipRoot::kUnserializable});
DeserializeDeferredObjects();
- RestoreExternalReferenceRedirectors(isolate, accessor_infos());
- RestoreExternalReferenceRedirectors(isolate, call_handler_infos());
+ for (Handle<AccessorInfo> info : accessor_infos()) {
+ RestoreExternalReferenceRedirector(isolate(), info);
+ }
+ for (Handle<CallHandlerInfo> info : call_handler_infos()) {
+ RestoreExternalReferenceRedirector(isolate(), info);
+ }
// Flush the instruction cache for the entire code-space. Must happen after
// builtins deserialization.
@@ -52,22 +51,23 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
CheckNoArrayBufferBackingStores();
- isolate->heap()->set_native_contexts_list(
- ReadOnlyRoots(isolate).undefined_value());
+ isolate()->heap()->set_native_contexts_list(
+ ReadOnlyRoots(isolate()).undefined_value());
// The allocation site list is build during root iteration, but if no sites
// were encountered then it needs to be initialized to undefined.
- if (isolate->heap()->allocation_sites_list() == Smi::zero()) {
- isolate->heap()->set_allocation_sites_list(
- ReadOnlyRoots(isolate).undefined_value());
+ if (isolate()->heap()->allocation_sites_list() == Smi::zero()) {
+ isolate()->heap()->set_allocation_sites_list(
+ ReadOnlyRoots(isolate()).undefined_value());
}
- isolate->heap()->set_dirty_js_finalization_registries_list(
- ReadOnlyRoots(isolate).undefined_value());
- isolate->heap()->set_dirty_js_finalization_registries_list_tail(
- ReadOnlyRoots(isolate).undefined_value());
+ isolate()->heap()->set_dirty_js_finalization_registries_list(
+ ReadOnlyRoots(isolate()).undefined_value());
+ isolate()->heap()->set_dirty_js_finalization_registries_list_tail(
+ ReadOnlyRoots(isolate()).undefined_value());
- isolate->builtins()->MarkInitialized();
+ isolate()->builtins()->MarkInitialized();
LogNewMapEvents();
+ WeakenDescriptorArrays();
if (FLAG_rehash_snapshot && can_rehash()) {
// Hash seed was initalized in ReadOnlyDeserializer.
@@ -84,16 +84,15 @@ void StartupDeserializer::DeserializeStringTable() {
// Add each string to the Isolate's string table.
// TODO(leszeks): Consider pre-sizing the string table.
for (int i = 0; i < string_table_size; ++i) {
- String string = String::cast(ReadObject());
- Address handle_storage = string.ptr();
- Handle<String> handle(&handle_storage);
- StringTableInsertionKey key(handle);
- String result = *isolate()->string_table()->LookupKey(isolate(), &key);
+ Handle<String> string = Handle<String>::cast(ReadObject());
+ StringTableInsertionKey key(string);
+ Handle<String> result =
+ isolate()->string_table()->LookupKey(isolate(), &key);
USE(result);
// This is startup, so there should be no duplicate entries in the string
// table, and the lookup should unconditionally add the given string.
- DCHECK_EQ(result, string);
+ DCHECK_EQ(*result, *string);
}
DCHECK_EQ(string_table_size, isolate()->string_table()->NumberOfElements());
diff --git a/deps/v8/src/snapshot/startup-deserializer.h b/deps/v8/src/snapshot/startup-deserializer.h
index 59533de8de..f744efc193 100644
--- a/deps/v8/src/snapshot/startup-deserializer.h
+++ b/deps/v8/src/snapshot/startup-deserializer.h
@@ -6,6 +6,7 @@
#define V8_SNAPSHOT_STARTUP_DESERIALIZER_H_
#include "src/snapshot/deserializer.h"
+#include "src/snapshot/snapshot-data.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
@@ -14,11 +15,14 @@ namespace internal {
// Initializes an isolate with context-independent data from a given snapshot.
class StartupDeserializer final : public Deserializer {
public:
- explicit StartupDeserializer(const SnapshotData* startup_data)
- : Deserializer(startup_data, false) {}
+ explicit StartupDeserializer(Isolate* isolate,
+ const SnapshotData* startup_data,
+ bool can_rehash)
+ : Deserializer(isolate, startup_data->Payload(),
+ startup_data->GetMagicNumber(), false, can_rehash) {}
// Deserialize the snapshot into an empty heap.
- void DeserializeInto(Isolate* isolate);
+ void DeserializeIntoIsolate();
private:
void DeserializeStringTable();
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 8606f6a019..88d3c77c66 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -66,14 +66,19 @@ StartupSerializer::StartupSerializer(Isolate* isolate,
Snapshot::SerializerFlags flags,
ReadOnlySerializer* read_only_serializer)
: RootsSerializer(isolate, flags, RootIndex::kFirstStrongRoot),
- read_only_serializer_(read_only_serializer) {
- allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
+ read_only_serializer_(read_only_serializer),
+ accessor_infos_(isolate->heap()),
+ call_handler_infos_(isolate->heap()) {
InitializeCodeAddressMap();
}
StartupSerializer::~StartupSerializer() {
- RestoreExternalReferenceRedirectors(isolate(), accessor_infos_);
- RestoreExternalReferenceRedirectors(isolate(), call_handler_infos_);
+ for (Handle<AccessorInfo> info : accessor_infos_) {
+ RestoreExternalReferenceRedirector(isolate(), info);
+ }
+ for (Handle<CallHandlerInfo> info : call_handler_infos_) {
+ RestoreExternalReferenceRedirector(isolate(), info);
+ }
OutputStatistics("StartupSerializer");
}
@@ -84,12 +89,6 @@ bool IsUnexpectedCodeObject(Isolate* isolate, HeapObject obj) {
if (!obj.IsCode()) return false;
Code code = Code::cast(obj);
-
- // TODO(v8:8768): Deopt entry code should not be serialized.
- if (code.kind() == CodeKind::STUB && isolate->deoptimizer_data() != nullptr) {
- if (isolate->deoptimizer_data()->IsDeoptEntryCode(code)) return false;
- }
-
if (code.kind() == CodeKind::REGEXP) return false;
if (!code.is_builtin()) return true;
if (code.is_off_heap_trampoline()) return false;
@@ -114,21 +113,21 @@ bool IsUnexpectedCodeObject(Isolate* isolate, HeapObject obj) {
} // namespace
#endif // DEBUG
-void StartupSerializer::SerializeObject(HeapObject obj) {
+void StartupSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
#ifdef DEBUG
- if (obj.IsJSFunction()) {
+ if (obj->IsJSFunction()) {
v8::base::OS::PrintError("Reference stack:\n");
PrintStack(std::cerr);
- obj.Print(std::cerr);
+ obj->Print(std::cerr);
FATAL(
"JSFunction should be added through the context snapshot instead of "
"the isolate snapshot");
}
#endif // DEBUG
- DCHECK(!IsUnexpectedCodeObject(isolate(), obj));
+ DCHECK(!IsUnexpectedCodeObject(isolate(), *obj));
if (SerializeHotObject(obj)) return;
- if (IsRootAndHasBeenSerialized(obj) && SerializeRoot(obj)) return;
+ if (IsRootAndHasBeenSerialized(*obj) && SerializeRoot(obj)) return;
if (SerializeUsingReadOnlyObjectCache(&sink_, obj)) return;
if (SerializeBackReference(obj)) return;
@@ -137,37 +136,37 @@ void StartupSerializer::SerializeObject(HeapObject obj) {
use_simulator = true;
#endif
- if (use_simulator && obj.IsAccessorInfo()) {
+ if (use_simulator && obj->IsAccessorInfo()) {
// Wipe external reference redirects in the accessor info.
- AccessorInfo info = AccessorInfo::cast(obj);
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(obj);
Address original_address =
- Foreign::cast(info.getter()).foreign_address(isolate());
- Foreign::cast(info.js_getter())
+ Foreign::cast(info->getter()).foreign_address(isolate());
+ Foreign::cast(info->js_getter())
.set_foreign_address(isolate(), original_address);
- accessor_infos_.push_back(info);
- } else if (use_simulator && obj.IsCallHandlerInfo()) {
- CallHandlerInfo info = CallHandlerInfo::cast(obj);
+ accessor_infos_.Push(*info);
+ } else if (use_simulator && obj->IsCallHandlerInfo()) {
+ Handle<CallHandlerInfo> info = Handle<CallHandlerInfo>::cast(obj);
Address original_address =
- Foreign::cast(info.callback()).foreign_address(isolate());
- Foreign::cast(info.js_callback())
+ Foreign::cast(info->callback()).foreign_address(isolate());
+ Foreign::cast(info->js_callback())
.set_foreign_address(isolate(), original_address);
- call_handler_infos_.push_back(info);
- } else if (obj.IsScript() && Script::cast(obj).IsUserJavaScript()) {
- Script::cast(obj).set_context_data(
+ call_handler_infos_.Push(*info);
+ } else if (obj->IsScript() && Handle<Script>::cast(obj)->IsUserJavaScript()) {
+ Handle<Script>::cast(obj)->set_context_data(
ReadOnlyRoots(isolate()).uninitialized_symbol());
- } else if (obj.IsSharedFunctionInfo()) {
+ } else if (obj->IsSharedFunctionInfo()) {
// Clear inferred name for native functions.
- SharedFunctionInfo shared = SharedFunctionInfo::cast(obj);
- if (!shared.IsSubjectToDebugging() && shared.HasUncompiledData()) {
- shared.uncompiled_data().set_inferred_name(
+ Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast(obj);
+ if (!shared->IsSubjectToDebugging() && shared->HasUncompiledData()) {
+ shared->uncompiled_data().set_inferred_name(
ReadOnlyRoots(isolate()).empty_string());
}
}
- CheckRehashability(obj);
+ CheckRehashability(*obj);
// Object has not yet been serialized. Serialize it here.
- DCHECK(!ReadOnlyHeap::Contains(obj));
+ DCHECK(!ReadOnlyHeap::Contains(*obj));
ObjectSerializer object_serializer(this, obj, &sink_);
object_serializer.Serialize();
}
@@ -225,7 +224,7 @@ void StartupSerializer::SerializeStringTable(StringTable* string_table) {
Object obj = current.load(isolate);
if (obj.IsHeapObject()) {
DCHECK(obj.IsInternalizedString());
- serializer_->SerializeObject(HeapObject::cast(obj));
+ serializer_->SerializeObject(handle(HeapObject::cast(obj), isolate));
}
}
}
@@ -243,9 +242,6 @@ void StartupSerializer::SerializeStrongReferences(
Isolate* isolate = this->isolate();
// No active threads.
CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
- // No active or weak handles.
- CHECK_IMPLIES(!allow_active_isolate_for_testing(),
- isolate->handle_scope_implementer()->blocks()->empty());
SanitizeIsolateScope sanitize_isolate(
isolate, allow_active_isolate_for_testing(), no_gc);
@@ -268,12 +264,12 @@ SerializedHandleChecker::SerializedHandleChecker(Isolate* isolate,
}
bool StartupSerializer::SerializeUsingReadOnlyObjectCache(
- SnapshotByteSink* sink, HeapObject obj) {
+ SnapshotByteSink* sink, Handle<HeapObject> obj) {
return read_only_serializer_->SerializeUsingReadOnlyObjectCache(sink, obj);
}
-void StartupSerializer::SerializeUsingStartupObjectCache(SnapshotByteSink* sink,
- HeapObject obj) {
+void StartupSerializer::SerializeUsingStartupObjectCache(
+ SnapshotByteSink* sink, Handle<HeapObject> obj) {
int cache_index = SerializeInObjectCache(obj);
sink->Put(kStartupObjectCache, "StartupObjectCache");
sink->PutInt(cache_index, "startup_object_cache_index");
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index d13d5d224e..ba4b44b2ff 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -7,6 +7,7 @@
#include <unordered_set>
+#include "src/handles/global-handles.h"
#include "src/snapshot/roots-serializer.h"
namespace v8 {
@@ -21,6 +22,8 @@ class V8_EXPORT_PRIVATE StartupSerializer : public RootsSerializer {
StartupSerializer(Isolate* isolate, Snapshot::SerializerFlags flags,
ReadOnlySerializer* read_only_serializer);
~StartupSerializer() override;
+ StartupSerializer(const StartupSerializer&) = delete;
+ StartupSerializer& operator=(const StartupSerializer&) = delete;
// Serialize the current state of the heap. The order is:
// 1) Strong roots
@@ -35,25 +38,24 @@ class V8_EXPORT_PRIVATE StartupSerializer : public RootsSerializer {
// ReadOnlyObjectCache bytecode into |sink|. Returns whether this was
// successful.
bool SerializeUsingReadOnlyObjectCache(SnapshotByteSink* sink,
- HeapObject obj);
+ Handle<HeapObject> obj);
// Adds |obj| to the startup object object cache if not already present and
// emits a StartupObjectCache bytecode into |sink|.
- void SerializeUsingStartupObjectCache(SnapshotByteSink* sink, HeapObject obj);
+ void SerializeUsingStartupObjectCache(SnapshotByteSink* sink,
+ Handle<HeapObject> obj);
// The per-heap dirty FinalizationRegistry list is weak and not serialized. No
// JSFinalizationRegistries should be used during startup.
void CheckNoDirtyFinalizationRegistries();
private:
- void SerializeObject(HeapObject o) override;
+ void SerializeObjectImpl(Handle<HeapObject> o) override;
void SerializeStringTable(StringTable* string_table);
ReadOnlySerializer* read_only_serializer_;
- std::vector<AccessorInfo> accessor_infos_;
- std::vector<CallHandlerInfo> call_handler_infos_;
-
- DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
+ GlobalHandleVector<AccessorInfo> accessor_infos_;
+ GlobalHandleVector<CallHandlerInfo> call_handler_infos_;
};
class SerializedHandleChecker : public RootVisitor {
diff --git a/deps/v8/src/strings/DIR_METADATA b/deps/v8/src/strings/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/strings/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/strings/OWNERS b/deps/v8/src/strings/OWNERS
index 3c29ae29e2..ac020e24a9 100644
--- a/deps/v8/src/strings/OWNERS
+++ b/deps/v8/src/strings/OWNERS
@@ -2,5 +2,3 @@ bmeurer@chromium.org
jkummerow@chromium.org
leszeks@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/strings/char-predicates-inl.h b/deps/v8/src/strings/char-predicates-inl.h
index 2dc7e50925..4c43172ff4 100644
--- a/deps/v8/src/strings/char-predicates-inl.h
+++ b/deps/v8/src/strings/char-predicates-inl.h
@@ -74,64 +74,102 @@ inline constexpr bool IsRegExpWord(uc32 c) {
}
// Constexpr cache table for character flags.
-enum AsciiCharFlags {
+enum OneByteCharFlags {
kIsIdentifierStart = 1 << 0,
kIsIdentifierPart = 1 << 1,
kIsWhiteSpace = 1 << 2,
- kIsWhiteSpaceOrLineTerminator = 1 << 3
+ kIsWhiteSpaceOrLineTerminator = 1 << 3,
+ kMaybeLineEnd = 1 << 4
};
-constexpr uint8_t BuildAsciiCharFlags(uc32 c) {
- return ((IsAsciiIdentifier(c) || c == '\\')
- ? (kIsIdentifierPart |
- (!IsDecimalDigit(c) ? kIsIdentifierStart : 0))
- : 0) |
- ((c == ' ' || c == '\t' || c == '\v' || c == '\f')
- ? kIsWhiteSpace | kIsWhiteSpaceOrLineTerminator
- : 0) |
- ((c == '\r' || c == '\n') ? kIsWhiteSpaceOrLineTerminator : 0);
-}
-const constexpr uint8_t kAsciiCharFlags[128] = {
-#define BUILD_CHAR_FLAGS(N) BuildAsciiCharFlags(N),
+
+// See http://www.unicode.org/Public/UCD/latest/ucd/DerivedCoreProperties.txt
+// ID_Start. Additionally includes '_' and '$'.
+constexpr bool IsOneByteIDStart(uc32 c) {
+ return c == 0x0024 || (c >= 0x0041 && c <= 0x005A) || c == 0x005F ||
+ (c >= 0x0061 && c <= 0x007A) || c == 0x00AA || c == 0x00B5 ||
+ c == 0x00BA || (c >= 0x00C0 && c <= 0x00D6) ||
+ (c >= 0x00D8 && c <= 0x00F6) || (c >= 0x00F8 && c <= 0x00FF);
+}
+
+// See http://www.unicode.org/Public/UCD/latest/ucd/DerivedCoreProperties.txt
+// ID_Continue. Additionally includes '_' and '$'.
+constexpr bool IsOneByteIDContinue(uc32 c) {
+ return c == 0x0024 || (c >= 0x0030 && c <= 0x0039) || c == 0x005F ||
+ (c >= 0x0041 && c <= 0x005A) || (c >= 0x0061 && c <= 0x007A) ||
+ c == 0x00AA || c == 0x00B5 || c == 0x00B7 || c == 0x00BA ||
+ (c >= 0x00C0 && c <= 0x00D6) || (c >= 0x00D8 && c <= 0x00F6) ||
+ (c >= 0x00F8 && c <= 0x00FF);
+}
+
+constexpr bool IsOneByteWhitespace(uc32 c) {
+ return c == '\t' || c == '\v' || c == '\f' || c == ' ' || c == u'\xa0';
+}
+
+constexpr uint8_t BuildOneByteCharFlags(uc32 c) {
+ uint8_t result = 0;
+ if (IsOneByteIDStart(c) || c == '\\') result |= kIsIdentifierStart;
+ if (IsOneByteIDContinue(c) || c == '\\') result |= kIsIdentifierPart;
+ if (IsOneByteWhitespace(c)) {
+ result |= kIsWhiteSpace | kIsWhiteSpaceOrLineTerminator;
+ }
+ if (c == '\r' || c == '\n') {
+ result |= kIsWhiteSpaceOrLineTerminator | kMaybeLineEnd;
+ }
+ // Add markers to identify 0x2028 and 0x2029.
+ if (c == static_cast<uint8_t>(0x2028) || c == static_cast<uint8_t>(0x2029)) {
+ result |= kMaybeLineEnd;
+ }
+ return result;
+}
+const constexpr uint8_t kOneByteCharFlags[256] = {
+#define BUILD_CHAR_FLAGS(N) BuildOneByteCharFlags(N),
INT_0_TO_127_LIST(BUILD_CHAR_FLAGS)
#undef BUILD_CHAR_FLAGS
+#define BUILD_CHAR_FLAGS(N) BuildOneByteCharFlags(N + 128),
+ INT_0_TO_127_LIST(BUILD_CHAR_FLAGS)
+#undef BUILD_CHAR_FLAGS
};
bool IsIdentifierStart(uc32 c) {
- if (!base::IsInRange(c, 0, 127)) return IsIdentifierStartSlow(c);
+ if (!base::IsInRange(c, 0, 255)) return IsIdentifierStartSlow(c);
DCHECK_EQ(IsIdentifierStartSlow(c),
- static_cast<bool>(kAsciiCharFlags[c] & kIsIdentifierStart));
- return kAsciiCharFlags[c] & kIsIdentifierStart;
+ static_cast<bool>(kOneByteCharFlags[c] & kIsIdentifierStart));
+ return kOneByteCharFlags[c] & kIsIdentifierStart;
}
bool IsIdentifierPart(uc32 c) {
- if (!base::IsInRange(c, 0, 127)) return IsIdentifierPartSlow(c);
+ if (!base::IsInRange(c, 0, 255)) return IsIdentifierPartSlow(c);
DCHECK_EQ(IsIdentifierPartSlow(c),
- static_cast<bool>(kAsciiCharFlags[c] & kIsIdentifierPart));
- return kAsciiCharFlags[c] & kIsIdentifierPart;
+ static_cast<bool>(kOneByteCharFlags[c] & kIsIdentifierPart));
+ return kOneByteCharFlags[c] & kIsIdentifierPart;
}
bool IsWhiteSpace(uc32 c) {
- if (!base::IsInRange(c, 0, 127)) return IsWhiteSpaceSlow(c);
+ if (!base::IsInRange(c, 0, 255)) return IsWhiteSpaceSlow(c);
DCHECK_EQ(IsWhiteSpaceSlow(c),
- static_cast<bool>(kAsciiCharFlags[c] & kIsWhiteSpace));
- return kAsciiCharFlags[c] & kIsWhiteSpace;
+ static_cast<bool>(kOneByteCharFlags[c] & kIsWhiteSpace));
+ return kOneByteCharFlags[c] & kIsWhiteSpace;
}
bool IsWhiteSpaceOrLineTerminator(uc32 c) {
- if (!base::IsInRange(c, 0, 127)) return IsWhiteSpaceOrLineTerminatorSlow(c);
+ if (!base::IsInRange(c, 0, 255)) return IsWhiteSpaceOrLineTerminatorSlow(c);
DCHECK_EQ(
IsWhiteSpaceOrLineTerminatorSlow(c),
- static_cast<bool>(kAsciiCharFlags[c] & kIsWhiteSpaceOrLineTerminator));
- return kAsciiCharFlags[c] & kIsWhiteSpaceOrLineTerminator;
+ static_cast<bool>(kOneByteCharFlags[c] & kIsWhiteSpaceOrLineTerminator));
+ return kOneByteCharFlags[c] & kIsWhiteSpaceOrLineTerminator;
}
bool IsLineTerminatorSequence(uc32 c, uc32 next) {
- if (!unibrow::IsLineTerminator(c)) return false;
- if (c == 0x000d && next == 0x000a) return false; // CR with following LF.
- return true;
+ if (kOneByteCharFlags[static_cast<uint8_t>(c)] & kMaybeLineEnd) {
+ if (c == '\n') return true;
+ if (c == '\r') return next != '\n';
+ return base::IsInRange(static_cast<unsigned int>(c), 0x2028u, 0x2029u);
+ }
+ return false;
}
} // namespace internal
+
} // namespace v8
#endif // V8_STRINGS_CHAR_PREDICATES_INL_H_
diff --git a/deps/v8/src/strings/string-stream.cc b/deps/v8/src/strings/string-stream.cc
index 5747f66bba..bcde4d7951 100644
--- a/deps/v8/src/strings/string-stream.cc
+++ b/deps/v8/src/strings/string-stream.cc
@@ -298,7 +298,7 @@ void StringStream::PrintName(Object name) {
void StringStream::PrintUsingMap(JSObject js_object) {
Map map = js_object.map();
- DescriptorArray descs = map.instance_descriptors();
+ DescriptorArray descs = map.instance_descriptors(kRelaxedLoad);
for (InternalIndex i : map.IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
if (details.location() == kField) {
diff --git a/deps/v8/src/strings/unicode-inl.h b/deps/v8/src/strings/unicode-inl.h
index 6f730b26be..0539f76264 100644
--- a/deps/v8/src/strings/unicode-inl.h
+++ b/deps/v8/src/strings/unicode-inl.h
@@ -59,6 +59,25 @@ int Mapping<T, s>::CalculateValue(uchar c, uchar n, uchar* result) {
}
#endif // !V8_INTL_SUPPORT
+bool Utf16::HasUnpairedSurrogate(const uint16_t* code_units, size_t length) {
+ for (size_t i = 0; i < length; ++i) {
+ const int code_unit = code_units[i];
+ if (IsLeadSurrogate(code_unit)) {
+ // The current code unit is a leading surrogate. Check if it is followed
+ // by a trailing surrogate.
+ if (i == length - 1) return true;
+ if (!IsTrailSurrogate(code_units[i + 1])) return true;
+ // Skip the paired trailing surrogate.
+ ++i;
+ } else if (IsTrailSurrogate(code_unit)) {
+ // All paired trailing surrogates are skipped above, so this branch is
+ // only for those that are unpaired.
+ return true;
+ }
+ }
+ return false;
+}
+
// Decodes UTF-8 bytes incrementally, allowing the decoding of bytes as they
// stream in. This **must** be followed by a call to ValueOfIncrementalFinish
// when the stream is complete, to ensure incomplete sequences are handled.
diff --git a/deps/v8/src/strings/unicode.h b/deps/v8/src/strings/unicode.h
index a050a27dc9..616ab1c6a9 100644
--- a/deps/v8/src/strings/unicode.h
+++ b/deps/v8/src/strings/unicode.h
@@ -128,6 +128,8 @@ class Utf16 {
static inline uint16_t TrailSurrogate(uint32_t char_code) {
return 0xdc00 + (char_code & 0x3ff);
}
+ static inline bool HasUnpairedSurrogate(const uint16_t* code_units,
+ size_t length);
};
class Latin1 {
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index e2efc8c9fd..a51535d392 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -47,6 +47,7 @@ namespace torque {
#define AST_TYPE_EXPRESSION_NODE_KIND_LIST(V) \
V(BasicTypeExpression) \
V(FunctionTypeExpression) \
+ V(PrecomputedTypeExpression) \
V(UnionTypeExpression)
#define AST_STATEMENT_NODE_KIND_LIST(V) \
@@ -651,6 +652,17 @@ struct FunctionTypeExpression : TypeExpression {
TypeExpression* return_type;
};
+// A PrecomputedTypeExpression is never created directly by the parser. Later
+// stages can use this to insert AST snippets where the type has already been
+// resolved.
+class Type;
+struct PrecomputedTypeExpression : TypeExpression {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(PrecomputedTypeExpression)
+ PrecomputedTypeExpression(SourcePosition pos, const Type* type)
+ : TypeExpression(kKind, pos), type(type) {}
+ const Type* type;
+};
+
struct UnionTypeExpression : TypeExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(UnionTypeExpression)
UnionTypeExpression(SourcePosition pos, TypeExpression* a, TypeExpression* b)
@@ -843,16 +855,22 @@ struct InstanceTypeConstraints {
struct AbstractTypeDeclaration : TypeDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(AbstractTypeDeclaration)
- AbstractTypeDeclaration(SourcePosition pos, Identifier* name, bool transient,
+ AbstractTypeDeclaration(SourcePosition pos, Identifier* name,
+ AbstractTypeFlags flags,
base::Optional<TypeExpression*> extends,
base::Optional<std::string> generates)
: TypeDeclaration(kKind, pos, name),
- is_constexpr(IsConstexprName(name->value)),
- transient(transient),
+ flags(flags),
extends(extends),
- generates(std::move(generates)) {}
- bool is_constexpr;
- bool transient;
+ generates(std::move(generates)) {
+ CHECK_EQ(IsConstexprName(name->value),
+ !!(flags & AbstractTypeFlag::kConstexpr));
+ }
+
+ bool IsConstexpr() const { return flags & AbstractTypeFlag::kConstexpr; }
+ bool IsTransient() const { return flags & AbstractTypeFlag::kTransient; }
+
+ AbstractTypeFlags flags;
base::Optional<TypeExpression*> extends;
base::Optional<std::string> generates;
};
@@ -1237,6 +1255,58 @@ T* MakeNode(Args... args) {
std::make_unique<T>(CurrentSourcePosition::Get(), std::move(args)...));
}
+inline FieldAccessExpression* MakeFieldAccessExpression(Expression* object,
+ std::string field) {
+ return MakeNode<FieldAccessExpression>(
+ object, MakeNode<Identifier>(std::move(field)));
+}
+
+inline IdentifierExpression* MakeIdentifierExpression(
+ std::vector<std::string> namespace_qualification, std::string name,
+ std::vector<TypeExpression*> args = {}) {
+ return MakeNode<IdentifierExpression>(std::move(namespace_qualification),
+ MakeNode<Identifier>(std::move(name)),
+ std::move(args));
+}
+
+inline IdentifierExpression* MakeIdentifierExpression(std::string name) {
+ return MakeIdentifierExpression({}, std::move(name));
+}
+
+inline CallExpression* MakeCallExpression(
+ IdentifierExpression* callee, std::vector<Expression*> arguments,
+ std::vector<Identifier*> labels = {}) {
+ return MakeNode<CallExpression>(callee, std::move(arguments),
+ std::move(labels));
+}
+
+inline CallExpression* MakeCallExpression(
+ std::string callee, std::vector<Expression*> arguments,
+ std::vector<Identifier*> labels = {}) {
+ return MakeCallExpression(MakeIdentifierExpression(std::move(callee)),
+ std::move(arguments), std::move(labels));
+}
+
+inline VarDeclarationStatement* MakeConstDeclarationStatement(
+ std::string name, Expression* initializer) {
+ return MakeNode<VarDeclarationStatement>(
+ /*const_qualified=*/true, MakeNode<Identifier>(std::move(name)),
+ base::Optional<TypeExpression*>{}, initializer);
+}
+
+inline BasicTypeExpression* MakeBasicTypeExpression(
+ std::vector<std::string> namespace_qualification, std::string name,
+ std::vector<TypeExpression*> generic_arguments = {}) {
+ return MakeNode<BasicTypeExpression>(std::move(namespace_qualification),
+ std::move(name),
+ std::move(generic_arguments));
+}
+
+inline StructExpression* MakeStructExpression(
+ TypeExpression* type, std::vector<NameAndExpression> initializers) {
+ return MakeNode<StructExpression>(type, std::move(initializers));
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/cc-generator.cc b/deps/v8/src/torque/cc-generator.cc
new file mode 100644
index 0000000000..53170817a1
--- /dev/null
+++ b/deps/v8/src/torque/cc-generator.cc
@@ -0,0 +1,460 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/cc-generator.h"
+
+#include "src/common/globals.h"
+#include "src/torque/global-context.h"
+#include "src/torque/type-oracle.h"
+#include "src/torque/types.h"
+#include "src/torque/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+base::Optional<Stack<std::string>> CCGenerator::EmitGraph(
+ Stack<std::string> parameters) {
+ for (BottomOffset i = {0}; i < parameters.AboveTop(); ++i) {
+ SetDefinitionVariable(DefinitionLocation::Parameter(i.offset),
+ parameters.Peek(i));
+ }
+
+ // C++ doesn't have parameterized labels like CSA, so we must pre-declare all
+ // phi values so they're in scope for both the blocks that define them and the
+ // blocks that read them.
+ for (Block* block : cfg_.blocks()) {
+ if (block->IsDead()) continue;
+
+ DCHECK_EQ(block->InputTypes().Size(), block->InputDefinitions().Size());
+ for (BottomOffset i = {0}; i < block->InputTypes().AboveTop(); ++i) {
+ DefinitionLocation input_def = block->InputDefinitions().Peek(i);
+ if (block->InputDefinitions().Peek(i).IsPhiFromBlock(block)) {
+ out() << " " << block->InputTypes().Peek(i)->GetRuntimeType() << " "
+ << DefinitionToVariable(input_def) << ";\n";
+ }
+ }
+ }
+
+ // Redirect the output of non-declarations into a buffer and only output
+ // declarations right away.
+ std::stringstream out_buffer;
+ std::ostream* old_out = out_;
+ out_ = &out_buffer;
+
+ EmitInstruction(GotoInstruction{cfg_.start()}, &parameters);
+
+ for (Block* block : cfg_.blocks()) {
+ if (cfg_.end() && *cfg_.end() == block) continue;
+ if (block->IsDead()) continue;
+ EmitBlock(block);
+ }
+
+ base::Optional<Stack<std::string>> result;
+ if (cfg_.end()) {
+ result = EmitBlock(*cfg_.end());
+ }
+
+ // All declarations have been printed now, so we can append the buffered
+ // output and redirect back to the original output stream.
+ out_ = old_out;
+ out() << out_buffer.str();
+
+ return result;
+}
+
+Stack<std::string> CCGenerator::EmitBlock(const Block* block) {
+ out() << "\n";
+ out() << " " << BlockName(block) << ":\n";
+
+ Stack<std::string> stack;
+
+ for (BottomOffset i = {0}; i < block->InputTypes().AboveTop(); ++i) {
+ const auto& def = block->InputDefinitions().Peek(i);
+ stack.Push(DefinitionToVariable(def));
+ if (def.IsPhiFromBlock(block)) {
+ decls() << " " << block->InputTypes().Peek(i)->GetRuntimeType() << " "
+ << stack.Top() << "{}; USE(" << stack.Top() << ");\n";
+ }
+ }
+
+ for (const Instruction& instruction : block->instructions()) {
+ TorqueCodeGenerator::EmitInstruction(instruction, &stack);
+ }
+ return stack;
+}
+
+void CCGenerator::EmitSourcePosition(SourcePosition pos, bool always_emit) {
+ const std::string& file = SourceFileMap::AbsolutePath(pos.source);
+ if (always_emit || !previous_position_.CompareStartIgnoreColumn(pos)) {
+ // Lines in Torque SourcePositions are zero-based, while the
+ // CodeStubAssembler and downwind systems are one-based.
+ out() << " // " << file << ":" << (pos.start.line + 1) << "\n";
+ previous_position_ = pos;
+ }
+}
+
+void CCGenerator::EmitInstruction(
+ const PushUninitializedInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: PushUninitialized");
+}
+
+void CCGenerator::EmitInstruction(
+ const PushBuiltinPointerInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: PushBuiltinPointer");
+}
+
+void CCGenerator::EmitInstruction(
+ const NamespaceConstantInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: NamespaceConstantInstruction");
+}
+
+std::vector<std::string> CCGenerator::ProcessArgumentsCommon(
+ const TypeVector& parameter_types,
+ std::vector<std::string> constexpr_arguments, Stack<std::string>* stack) {
+ std::vector<std::string> args;
+ for (auto it = parameter_types.rbegin(); it != parameter_types.rend(); ++it) {
+ const Type* type = *it;
+ VisitResult arg;
+ if (type->IsConstexpr()) {
+ args.push_back(std::move(constexpr_arguments.back()));
+ constexpr_arguments.pop_back();
+ } else {
+ std::stringstream s;
+ size_t slot_count = LoweredSlotCount(type);
+ VisitResult arg = VisitResult(type, stack->TopRange(slot_count));
+ EmitCCValue(arg, *stack, s);
+ args.push_back(s.str());
+ stack->PopMany(slot_count);
+ }
+ }
+ std::reverse(args.begin(), args.end());
+ return args;
+}
+
+void CCGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
+ Stack<std::string>* stack) {
+ TypeVector parameter_types =
+ instruction.intrinsic->signature().parameter_types.types;
+ std::vector<std::string> args = ProcessArgumentsCommon(
+ parameter_types, instruction.constexpr_arguments, stack);
+
+ Stack<std::string> pre_call_stack = *stack;
+ const Type* return_type = instruction.intrinsic->signature().return_type;
+ std::vector<std::string> results;
+
+ const auto lowered = LowerType(return_type);
+ for (std::size_t i = 0; i < lowered.size(); ++i) {
+ results.push_back(DefinitionToVariable(instruction.GetValueDefinition(i)));
+ stack->Push(results.back());
+ decls() << " " << lowered[i]->GetRuntimeType() << " " << stack->Top()
+ << "{}; USE(" << stack->Top() << ");\n";
+ }
+
+ out() << " ";
+ if (return_type->StructSupertype()) {
+ out() << "std::tie(";
+ PrintCommaSeparatedList(out(), results);
+ out() << ") = ";
+ } else {
+ if (results.size() == 1) {
+ out() << results[0] << " = ";
+ }
+ }
+
+ if (instruction.intrinsic->ExternalName() == "%RawDownCast") {
+ if (parameter_types.size() != 1) {
+ ReportError("%RawDownCast must take a single parameter");
+ }
+ const Type* original_type = parameter_types[0];
+ bool is_subtype =
+ return_type->IsSubtypeOf(original_type) ||
+ (original_type == TypeOracle::GetUninitializedHeapObjectType() &&
+ return_type->IsSubtypeOf(TypeOracle::GetHeapObjectType()));
+ if (!is_subtype) {
+ ReportError("%RawDownCast error: ", *return_type, " is not a subtype of ",
+ *original_type);
+ }
+ if (!original_type->StructSupertype() &&
+ return_type->GetRuntimeType() != original_type->GetRuntimeType()) {
+ out() << "static_cast<" << return_type->GetRuntimeType() << ">";
+ }
+ } else if (instruction.intrinsic->ExternalName() == "%GetClassMapConstant") {
+ ReportError("C++ generator doesn't yet support %GetClassMapConstant");
+ } else if (instruction.intrinsic->ExternalName() == "%FromConstexpr") {
+ if (parameter_types.size() != 1 || !parameter_types[0]->IsConstexpr()) {
+ ReportError(
+ "%FromConstexpr must take a single parameter with constexpr "
+ "type");
+ }
+ if (return_type->IsConstexpr()) {
+ ReportError("%FromConstexpr must return a non-constexpr type");
+ }
+ // Nothing to do here; constexpr expressions are already valid C++.
+ } else {
+ ReportError("no built in intrinsic with name " +
+ instruction.intrinsic->ExternalName());
+ }
+
+ out() << "(";
+ PrintCommaSeparatedList(out(), args);
+ out() << ");\n";
+}
+
+void CCGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
+ Stack<std::string>* stack) {
+ TypeVector parameter_types =
+ instruction.macro->signature().parameter_types.types;
+ std::vector<std::string> args = ProcessArgumentsCommon(
+ parameter_types, instruction.constexpr_arguments, stack);
+
+ Stack<std::string> pre_call_stack = *stack;
+ const Type* return_type = instruction.macro->signature().return_type;
+ std::vector<std::string> results;
+
+ const auto lowered = LowerType(return_type);
+ for (std::size_t i = 0; i < lowered.size(); ++i) {
+ results.push_back(DefinitionToVariable(instruction.GetValueDefinition(i)));
+ stack->Push(results.back());
+ decls() << " " << lowered[i]->GetRuntimeType() << " " << stack->Top()
+ << "{}; USE(" << stack->Top() << ");\n";
+ }
+
+ // We should have inlined any calls requiring complex control flow.
+ CHECK(!instruction.catch_block);
+ out() << " ";
+ if (return_type->StructSupertype().has_value()) {
+ out() << "std::tie(";
+ PrintCommaSeparatedList(out(), results);
+ out() << ") = ";
+ } else {
+ if (results.size() == 1) {
+ out() << results[0] << " = ";
+ } else {
+ DCHECK_EQ(0, results.size());
+ }
+ }
+
+ out() << instruction.macro->CCName() << "(isolate";
+ if (!args.empty()) out() << ", ";
+ PrintCommaSeparatedList(out(), args);
+ out() << ");\n";
+}
+
+void CCGenerator::EmitInstruction(
+ const CallCsaMacroAndBranchInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: CallCsaMacroAndBranch");
+}
+
+void CCGenerator::EmitInstruction(const CallBuiltinInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: CallBuiltin");
+}
+
+void CCGenerator::EmitInstruction(
+ const CallBuiltinPointerInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: CallBuiltinPointer");
+}
+
+void CCGenerator::EmitInstruction(const CallRuntimeInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: CallRuntime");
+}
+
+void CCGenerator::EmitInstruction(const BranchInstruction& instruction,
+ Stack<std::string>* stack) {
+ out() << " if (" << stack->Pop() << ") {\n";
+ EmitGoto(instruction.if_true, stack, " ");
+ out() << " } else {\n";
+ EmitGoto(instruction.if_false, stack, " ");
+ out() << " }\n";
+}
+
+void CCGenerator::EmitInstruction(const ConstexprBranchInstruction& instruction,
+ Stack<std::string>* stack) {
+ out() << " if ((" << instruction.condition << ")) {\n";
+ EmitGoto(instruction.if_true, stack, " ");
+ out() << " } else {\n";
+ EmitGoto(instruction.if_false, stack, " ");
+ out() << " }\n";
+}
+
+void CCGenerator::EmitGoto(const Block* destination, Stack<std::string>* stack,
+ std::string indentation) {
+ const auto& destination_definitions = destination->InputDefinitions();
+ DCHECK_EQ(stack->Size(), destination_definitions.Size());
+ for (BottomOffset i = {0}; i < stack->AboveTop(); ++i) {
+ DefinitionLocation def = destination_definitions.Peek(i);
+ if (def.IsPhiFromBlock(destination)) {
+ out() << indentation << DefinitionToVariable(def) << " = "
+ << stack->Peek(i) << ";\n";
+ }
+ }
+ out() << indentation << "goto " << BlockName(destination) << ";\n";
+}
+
+void CCGenerator::EmitInstruction(const GotoInstruction& instruction,
+ Stack<std::string>* stack) {
+ EmitGoto(instruction.destination, stack, " ");
+}
+
+void CCGenerator::EmitInstruction(const GotoExternalInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: GotoExternal");
+}
+
+void CCGenerator::EmitInstruction(const ReturnInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: Return");
+}
+
+void CCGenerator::EmitInstruction(
+ const PrintConstantStringInstruction& instruction,
+ Stack<std::string>* stack) {
+ out() << " std::cout << " << StringLiteralQuote(instruction.message)
+ << ";\n";
+}
+
+void CCGenerator::EmitInstruction(const AbortInstruction& instruction,
+ Stack<std::string>* stack) {
+ switch (instruction.kind) {
+ case AbortInstruction::Kind::kUnreachable:
+ DCHECK(instruction.message.empty());
+ out() << " UNREACHABLE();\n";
+ break;
+ case AbortInstruction::Kind::kDebugBreak:
+ DCHECK(instruction.message.empty());
+ out() << " base::OS::DebugBreak();\n";
+ break;
+ case AbortInstruction::Kind::kAssertionFailure: {
+ std::string file = StringLiteralQuote(
+ SourceFileMap::PathFromV8Root(instruction.pos.source));
+ out() << " CHECK(false, \"Failed Torque assertion: '\""
+ << StringLiteralQuote(instruction.message) << "\"' at \"" << file
+ << "\":\""
+ << StringLiteralQuote(
+ std::to_string(instruction.pos.start.line + 1))
+ << ");\n";
+ break;
+ }
+ }
+}
+
+void CCGenerator::EmitInstruction(const UnsafeCastInstruction& instruction,
+ Stack<std::string>* stack) {
+ const std::string str = "static_cast<" +
+ instruction.destination_type->GetRuntimeType() +
+ ">(" + stack->Top() + ")";
+ stack->Poke(stack->AboveTop() - 1, str);
+ SetDefinitionVariable(instruction.GetValueDefinition(), str);
+}
+
+void CCGenerator::EmitInstruction(const LoadReferenceInstruction& instruction,
+ Stack<std::string>* stack) {
+ std::string result_name =
+ DefinitionToVariable(instruction.GetValueDefinition());
+
+ std::string offset = stack->Pop();
+ std::string object = stack->Pop();
+ stack->Push(result_name);
+
+ std::string result_type = instruction.type->GetRuntimeType();
+ decls() << " " << result_type << " " << result_name << "{}; USE("
+ << result_name << ");\n";
+ out() << " " << result_name << " = ";
+ if (instruction.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ out() << "TaggedField<" << result_type << ">::load(isolate, " << object
+ << ", static_cast<int>(" << offset << "));\n";
+ } else {
+ out() << "(" << object << ").ReadField<" << result_type << ">(" << offset
+ << ");\n";
+ }
+}
+
+void CCGenerator::EmitInstruction(const StoreReferenceInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: StoreReference");
+}
+
+namespace {
+std::string GetBitFieldSpecialization(const Type* container,
+ const BitField& field) {
+ std::stringstream stream;
+ stream << "base::BitField<"
+ << field.name_and_type.type->GetConstexprGeneratedTypeName() << ", "
+ << field.offset << ", " << field.num_bits << ", "
+ << container->GetConstexprGeneratedTypeName() << ">";
+ return stream.str();
+}
+} // namespace
+
+void CCGenerator::EmitInstruction(const LoadBitFieldInstruction& instruction,
+ Stack<std::string>* stack) {
+ std::string result_name =
+ DefinitionToVariable(instruction.GetValueDefinition());
+
+ std::string bit_field_struct = stack->Pop();
+ stack->Push(result_name);
+
+ const Type* struct_type = instruction.bit_field_struct_type;
+
+ decls() << " " << instruction.bit_field.name_and_type.type->GetRuntimeType()
+ << " " << result_name << "{}; USE(" << result_name << ");\n";
+
+ base::Optional<const Type*> smi_tagged_type =
+ Type::MatchUnaryGeneric(struct_type, TypeOracle::GetSmiTaggedGeneric());
+ if (smi_tagged_type) {
+ // Get the untagged value and its type.
+ bit_field_struct = bit_field_struct + ".value()";
+ struct_type = *smi_tagged_type;
+ }
+
+ out() << " " << result_name << " = "
+ << GetBitFieldSpecialization(struct_type, instruction.bit_field)
+ << "::decode(" << bit_field_struct << ");\n";
+}
+
+void CCGenerator::EmitInstruction(const StoreBitFieldInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: StoreBitField");
+}
+
+// static
+void CCGenerator::EmitCCValue(VisitResult result,
+ const Stack<std::string>& values,
+ std::ostream& out) {
+ if (!result.IsOnStack()) {
+ out << result.constexpr_value();
+ } else if (auto struct_type = result.type()->StructSupertype()) {
+ out << "std::tuple_cat(";
+ bool first = true;
+ for (auto& field : (*struct_type)->fields()) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+ if (!field.name_and_type.type->IsStructType()) {
+ out << "std::make_tuple(";
+ }
+ EmitCCValue(ProjectStructField(result, field.name_and_type.name), values,
+ out);
+ if (!field.name_and_type.type->IsStructType()) {
+ out << ")";
+ }
+ }
+ out << ")";
+ } else {
+ DCHECK_EQ(1, result.stack_range().Size());
+ out << values.Peek(result.stack_range().begin());
+ }
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/cc-generator.h b/deps/v8/src/torque/cc-generator.h
new file mode 100644
index 0000000000..5626f3f7fa
--- /dev/null
+++ b/deps/v8/src/torque/cc-generator.h
@@ -0,0 +1,46 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_CC_GENERATOR_H_
+#define V8_TORQUE_CC_GENERATOR_H_
+
+#include "src/torque/torque-code-generator.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+class CCGenerator : public TorqueCodeGenerator {
+ public:
+ CCGenerator(const ControlFlowGraph& cfg, std::ostream& out)
+ : TorqueCodeGenerator(cfg, out) {}
+ base::Optional<Stack<std::string>> EmitGraph(Stack<std::string> parameters);
+
+ static void EmitCCValue(VisitResult result, const Stack<std::string>& values,
+ std::ostream& out);
+
+ private:
+ void EmitSourcePosition(SourcePosition pos,
+ bool always_emit = false) override;
+
+ void EmitGoto(const Block* destination, Stack<std::string>* stack,
+ std::string indentation);
+
+ std::vector<std::string> ProcessArgumentsCommon(
+ const TypeVector& parameter_types,
+ std::vector<std::string> constexpr_arguments, Stack<std::string>* stack);
+
+ Stack<std::string> EmitBlock(const Block* block);
+#define EMIT_INSTRUCTION_DECLARATION(T) \
+ void EmitInstruction(const T& instruction, Stack<std::string>* stack) \
+ override;
+ TORQUE_BACKEND_DEPENDENT_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
+#undef EMIT_INSTRUCTION_DECLARATION
+};
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_CC_GENERATOR_H_
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
index 8cb3a64c30..54c54c5819 100644
--- a/deps/v8/src/torque/constants.h
+++ b/deps/v8/src/torque/constants.h
@@ -76,6 +76,7 @@ static const char* const UNINITIALIZED_ITERATOR_TYPE_STRING =
static const char* const GENERIC_TYPE_INSTANTIATION_NAMESPACE_STRING =
"_generic_type_instantiation_namespace";
static const char* const FIXED_ARRAY_BASE_TYPE_STRING = "FixedArrayBase";
+static const char* const WEAK_HEAP_OBJECT = "WeakHeapObject";
static const char* const STATIC_ASSERT_MACRO_STRING = "StaticAssert";
static const char* const ANNOTATION_GENERATE_PRINT = "@generatePrint";
@@ -84,6 +85,8 @@ static const char* const ANNOTATION_ABSTRACT = "@abstract";
static const char* const ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT =
"@hasSameInstanceTypeAsParent";
static const char* const ANNOTATION_GENERATE_CPP_CLASS = "@generateCppClass";
+static const char* const ANNOTATION_CUSTOM_MAP = "@customMap";
+static const char* const ANNOTATION_CUSTOM_CPP_CLASS = "@customCppClass";
static const char* const ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT =
"@highestInstanceTypeWithinParentClassRange";
static const char* const ANNOTATION_LOWEST_INSTANCE_TYPE_WITHIN_PARENT =
@@ -96,8 +99,10 @@ static const char* const ANNOTATION_IF = "@if";
static const char* const ANNOTATION_IFNOT = "@ifnot";
static const char* const ANNOTATION_GENERATE_BODY_DESCRIPTOR =
"@generateBodyDescriptor";
-static const char* const ANNOTATION_EXPORT_CPP_CLASS = "@export";
+static const char* const ANNOTATION_EXPORT = "@export";
static const char* const ANNOTATION_DO_NOT_GENERATE_CAST = "@doNotGenerateCast";
+static const char* const ANNOTATION_USE_PARENT_TYPE_CHECKER =
+ "@useParentTypeChecker";
inline bool IsConstexprName(const std::string& name) {
return name.substr(0, std::strlen(CONSTEXPR_TYPE_PREFIX)) ==
@@ -117,7 +122,8 @@ inline std::string GetConstexprName(const std::string& name) {
enum class AbstractTypeFlag {
kNone = 0,
kTransient = 1 << 0,
- kConstexpr = 1 << 1
+ kConstexpr = 1 << 1,
+ kUseParentTypeChecker = 1 << 2,
};
using AbstractTypeFlags = base::Flags<AbstractTypeFlag>;
@@ -131,12 +137,14 @@ enum class ClassFlag {
kIsShape = 1 << 5,
kHasSameInstanceTypeAsParent = 1 << 6,
kGenerateCppClassDefinitions = 1 << 7,
+ kCustomCppClass = 1 << 8,
kHighestInstanceTypeWithinParent = 1 << 9,
kLowestInstanceTypeWithinParent = 1 << 10,
kUndefinedLayout = 1 << 11,
kGenerateBodyDescriptor = 1 << 12,
kExport = 1 << 13,
- kDoNotGenerateCast = 1 << 14
+ kDoNotGenerateCast = 1 << 14,
+ kCustomMap = 1 << 15,
};
using ClassFlags = base::Flags<ClassFlag>;
diff --git a/deps/v8/src/torque/csa-generator.cc b/deps/v8/src/torque/csa-generator.cc
index da16a1b3b4..93e8d47df4 100644
--- a/deps/v8/src/torque/csa-generator.cc
+++ b/deps/v8/src/torque/csa-generator.cc
@@ -83,7 +83,7 @@ Stack<std::string> CSAGenerator::EmitBlock(const Block* block) {
out() << " ca_.Bind(&" << BlockName(block) << phi_names.str() << ");\n";
for (const Instruction& instruction : block->instructions()) {
- EmitInstruction(instruction, &stack);
+ TorqueCodeGenerator::EmitInstruction(instruction, &stack);
}
return stack;
}
@@ -99,53 +99,6 @@ void CSAGenerator::EmitSourcePosition(SourcePosition pos, bool always_emit) {
}
}
-bool CSAGenerator::IsEmptyInstruction(const Instruction& instruction) {
- switch (instruction.kind()) {
- case InstructionKind::kPeekInstruction:
- case InstructionKind::kPokeInstruction:
- case InstructionKind::kDeleteRangeInstruction:
- case InstructionKind::kPushUninitializedInstruction:
- case InstructionKind::kPushBuiltinPointerInstruction:
- case InstructionKind::kUnsafeCastInstruction:
- return true;
- default:
- return false;
- }
-}
-
-void CSAGenerator::EmitInstruction(const Instruction& instruction,
- Stack<std::string>* stack) {
-#ifdef DEBUG
- if (!IsEmptyInstruction(instruction)) {
- EmitSourcePosition(instruction->pos);
- }
-#endif
-
- switch (instruction.kind()) {
-#define ENUM_ITEM(T) \
- case InstructionKind::k##T: \
- return EmitInstruction(instruction.Cast<T>(), stack);
- TORQUE_INSTRUCTION_LIST(ENUM_ITEM)
-#undef ENUM_ITEM
- }
-}
-
-void CSAGenerator::EmitInstruction(const PeekInstruction& instruction,
- Stack<std::string>* stack) {
- stack->Push(stack->Peek(instruction.slot));
-}
-
-void CSAGenerator::EmitInstruction(const PokeInstruction& instruction,
- Stack<std::string>* stack) {
- stack->Poke(instruction.slot, stack->Top());
- stack->Pop();
-}
-
-void CSAGenerator::EmitInstruction(const DeleteRangeInstruction& instruction,
- Stack<std::string>* stack) {
- stack->DeleteRange(instruction.range);
-}
-
void CSAGenerator::EmitInstruction(
const PushUninitializedInstruction& instruction,
Stack<std::string>* stack) {
@@ -198,35 +151,35 @@ void CSAGenerator::EmitInstruction(
}
}
-void CSAGenerator::ProcessArgumentsCommon(
- const TypeVector& parameter_types, std::vector<std::string>* args,
- std::vector<std::string>* constexpr_arguments, Stack<std::string>* stack) {
+std::vector<std::string> CSAGenerator::ProcessArgumentsCommon(
+ const TypeVector& parameter_types,
+ std::vector<std::string> constexpr_arguments, Stack<std::string>* stack) {
+ std::vector<std::string> args;
for (auto it = parameter_types.rbegin(); it != parameter_types.rend(); ++it) {
const Type* type = *it;
VisitResult arg;
if (type->IsConstexpr()) {
- args->push_back(std::move(constexpr_arguments->back()));
- constexpr_arguments->pop_back();
+ args.push_back(std::move(constexpr_arguments.back()));
+ constexpr_arguments.pop_back();
} else {
std::stringstream s;
size_t slot_count = LoweredSlotCount(type);
VisitResult arg = VisitResult(type, stack->TopRange(slot_count));
EmitCSAValue(arg, *stack, s);
- args->push_back(s.str());
+ args.push_back(s.str());
stack->PopMany(slot_count);
}
}
- std::reverse(args->begin(), args->end());
+ std::reverse(args.begin(), args.end());
+ return args;
}
void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
Stack<std::string>* stack) {
- std::vector<std::string> constexpr_arguments =
- instruction.constexpr_arguments;
- std::vector<std::string> args;
TypeVector parameter_types =
instruction.intrinsic->signature().parameter_types.types;
- ProcessArgumentsCommon(parameter_types, &args, &constexpr_arguments, stack);
+ std::vector<std::string> args = ProcessArgumentsCommon(
+ parameter_types, instruction.constexpr_arguments, stack);
Stack<std::string> pre_call_stack = *stack;
const Type* return_type = instruction.intrinsic->signature().return_type;
@@ -355,12 +308,10 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
Stack<std::string>* stack) {
- std::vector<std::string> constexpr_arguments =
- instruction.constexpr_arguments;
- std::vector<std::string> args;
TypeVector parameter_types =
instruction.macro->signature().parameter_types.types;
- ProcessArgumentsCommon(parameter_types, &args, &constexpr_arguments, stack);
+ std::vector<std::string> args = ProcessArgumentsCommon(
+ parameter_types, instruction.constexpr_arguments, stack);
Stack<std::string> pre_call_stack = *stack;
const Type* return_type = instruction.macro->signature().return_type;
@@ -409,12 +360,10 @@ void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
void CSAGenerator::EmitInstruction(
const CallCsaMacroAndBranchInstruction& instruction,
Stack<std::string>* stack) {
- std::vector<std::string> constexpr_arguments =
- instruction.constexpr_arguments;
- std::vector<std::string> args;
TypeVector parameter_types =
instruction.macro->signature().parameter_types.types;
- ProcessArgumentsCommon(parameter_types, &args, &constexpr_arguments, stack);
+ std::vector<std::string> args = ProcessArgumentsCommon(
+ parameter_types, instruction.constexpr_arguments, stack);
Stack<std::string> pre_call_stack = *stack;
std::vector<std::string> results;
diff --git a/deps/v8/src/torque/csa-generator.h b/deps/v8/src/torque/csa-generator.h
index 83c4ec410a..c2400609d4 100644
--- a/deps/v8/src/torque/csa-generator.h
+++ b/deps/v8/src/torque/csa-generator.h
@@ -5,24 +5,17 @@
#ifndef V8_TORQUE_CSA_GENERATOR_H_
#define V8_TORQUE_CSA_GENERATOR_H_
-#include <iostream>
-
-#include "src/torque/cfg.h"
-#include "src/torque/declarable.h"
+#include "src/torque/torque-code-generator.h"
namespace v8 {
namespace internal {
namespace torque {
-class CSAGenerator {
+class CSAGenerator : public TorqueCodeGenerator {
public:
CSAGenerator(const ControlFlowGraph& cfg, std::ostream& out,
base::Optional<Builtin::Kind> linkage = base::nullopt)
- : cfg_(cfg),
- out_(&out),
- out_decls_(&out),
- linkage_(linkage),
- previous_position_(SourcePosition::Invalid()) {}
+ : TorqueCodeGenerator(cfg, out), linkage_(linkage) {}
base::Optional<Stack<std::string>> EmitGraph(Stack<std::string> parameters);
static constexpr const char* ARGUMENTS_VARIABLE_STRING = "arguments";
@@ -31,46 +24,10 @@ class CSAGenerator {
std::ostream& out);
private:
- const ControlFlowGraph& cfg_;
- std::ostream* out_;
- std::ostream* out_decls_;
- size_t fresh_id_ = 0;
base::Optional<Builtin::Kind> linkage_;
- SourcePosition previous_position_;
- std::map<DefinitionLocation, std::string> location_map_;
-
- std::string DefinitionToVariable(const DefinitionLocation& location) {
- if (location.IsPhi()) {
- std::stringstream stream;
- stream << "phi_bb" << location.GetPhiBlock()->id() << "_"
- << location.GetPhiIndex();
- return stream.str();
- } else if (location.IsParameter()) {
- auto it = location_map_.find(location);
- DCHECK_NE(it, location_map_.end());
- return it->second;
- } else {
- DCHECK(location.IsInstruction());
- auto it = location_map_.find(location);
- if (it == location_map_.end()) {
- it = location_map_.insert(std::make_pair(location, FreshNodeName()))
- .first;
- }
- return it->second;
- }
- }
-
- void SetDefinitionVariable(const DefinitionLocation& definition,
- const std::string& str) {
- DCHECK_EQ(location_map_.find(definition), location_map_.end());
- location_map_.insert(std::make_pair(definition, str));
- }
- std::ostream& out() { return *out_; }
- std::ostream& decls() { return *out_decls_; }
-
- bool IsEmptyInstruction(const Instruction& instruction);
- void EmitSourcePosition(SourcePosition pos, bool always_emit = false);
+ void EmitSourcePosition(SourcePosition pos,
+ bool always_emit = false) override;
std::string PreCallableExceptionPreparation(
base::Optional<Block*> catch_block);
@@ -79,24 +36,15 @@ class CSAGenerator {
base::Optional<Block*> catch_block, Stack<std::string>* stack,
const base::Optional<DefinitionLocation>& exception_object_definition);
- std::string FreshNodeName() { return "tmp" + std::to_string(fresh_id_++); }
- std::string FreshCatchName() { return "catch" + std::to_string(fresh_id_++); }
- std::string FreshLabelName() { return "label" + std::to_string(fresh_id_++); }
- std::string BlockName(const Block* block) {
- return "block" + std::to_string(block->id());
- }
-
- void ProcessArgumentsCommon(const TypeVector& parameter_types,
- std::vector<std::string>* args,
- std::vector<std::string>* constexpr_arguments,
- Stack<std::string>* stack);
+ std::vector<std::string> ProcessArgumentsCommon(
+ const TypeVector& parameter_types,
+ std::vector<std::string> constexpr_arguments, Stack<std::string>* stack);
Stack<std::string> EmitBlock(const Block* block);
- void EmitInstruction(const Instruction& instruction,
- Stack<std::string>* stack);
-#define EMIT_INSTRUCTION_DECLARATION(T) \
- void EmitInstruction(const T& instruction, Stack<std::string>* stack);
- TORQUE_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
+#define EMIT_INSTRUCTION_DECLARATION(T) \
+ void EmitInstruction(const T& instruction, Stack<std::string>* stack) \
+ override;
+ TORQUE_BACKEND_DEPENDENT_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
#undef EMIT_INSTRUCTION_DECLARATION
};
diff --git a/deps/v8/src/torque/declarable.h b/deps/v8/src/torque/declarable.h
index 3580d9b6dd..27edf79636 100644
--- a/deps/v8/src/torque/declarable.h
+++ b/deps/v8/src/torque/declarable.h
@@ -291,6 +291,11 @@ class ExternConstant : public Value {
}
};
+enum class OutputType {
+ kCSA,
+ kCC,
+};
+
class Callable : public Scope {
public:
DECLARE_DECLARABLE_BOILERPLATE(Callable, callable)
@@ -308,8 +313,26 @@ class Callable : public Scope {
bool HasReturns() const { return returns_; }
base::Optional<Statement*> body() const { return body_; }
bool IsExternal() const { return !body_.has_value(); }
- virtual bool ShouldBeInlined() const { return false; }
- virtual bool ShouldGenerateExternalCode() const { return !ShouldBeInlined(); }
+ virtual bool ShouldBeInlined(OutputType output_type) const {
+ // C++ output doesn't support exiting to labels, so functions with labels in
+ // the signature must be inlined.
+ return output_type == OutputType::kCC && !signature().labels.empty();
+ }
+ bool ShouldGenerateExternalCode(OutputType output_type) const {
+ return !ShouldBeInlined(output_type);
+ }
+
+ static std::string PrefixNameForCCOutput(const std::string& name) {
+ // If a Torque macro requires a C++ runtime function to be generated, then
+ // the generated function begins with this prefix to avoid any naming
+ // collisions with the generated CSA function for the same macro.
+ return "TqRuntime" + name;
+ }
+
+ // Name to use in runtime C++ code.
+ virtual std::string CCName() const {
+ return PrefixNameForCCOutput(ExternalName());
+ }
protected:
Callable(Declarable::Kind kind, std::string external_name,
@@ -336,7 +359,7 @@ class Callable : public Scope {
class Macro : public Callable {
public:
DECLARE_DECLARABLE_BOILERPLATE(Macro, macro)
- bool ShouldBeInlined() const override {
+ bool ShouldBeInlined(OutputType output_type) const override {
for (const LabelDeclaration& label : signature().labels) {
for (const Type* type : label.types) {
if (type->StructSupertype()) return true;
@@ -345,7 +368,7 @@ class Macro : public Callable {
// Intrinsics that are used internally in Torque and implemented as torque
// code should be inlined and not generate C++ definitions.
if (ReadableName()[0] == '%') return true;
- return Callable::ShouldBeInlined();
+ return Callable::ShouldBeInlined(output_type);
}
void SetUsed() { used_ = true; }
@@ -375,6 +398,11 @@ class ExternMacro : public Macro {
return external_assembler_name_;
}
+ std::string CCName() const override {
+ return "TorqueRuntimeMacroShims::" + external_assembler_name() +
+ "::" + ExternalName();
+ }
+
private:
friend class Declarations;
ExternMacro(const std::string& name, std::string external_assembler_name,
@@ -390,6 +418,12 @@ class TorqueMacro : public Macro {
public:
DECLARE_DECLARABLE_BOILERPLATE(TorqueMacro, TorqueMacro)
bool IsExportedToCSA() const { return exported_to_csa_; }
+ std::string CCName() const override {
+ // Exported functions must have unique and C++-friendly readable names, so
+ // prefer those wherever possible.
+ return PrefixNameForCCOutput(IsExportedToCSA() ? ReadableName()
+ : ExternalName());
+ }
protected:
TorqueMacro(Declarable::Kind kind, std::string external_name,
@@ -417,8 +451,8 @@ class TorqueMacro : public Macro {
class Method : public TorqueMacro {
public:
DECLARE_DECLARABLE_BOILERPLATE(Method, Method)
- bool ShouldBeInlined() const override {
- return Macro::ShouldBeInlined() ||
+ bool ShouldBeInlined(OutputType output_type) const override {
+ return Macro::ShouldBeInlined(output_type) ||
signature()
.parameter_types.types[signature().implicit_count]
->IsStructType();
diff --git a/deps/v8/src/torque/declarations.cc b/deps/v8/src/torque/declarations.cc
index 0a1d45a510..1e1c89da86 100644
--- a/deps/v8/src/torque/declarations.cc
+++ b/deps/v8/src/torque/declarations.cc
@@ -214,6 +214,7 @@ Macro* Declarations::DeclareMacro(
macro = CreateTorqueMacro(name, name, accessible_from_csa, signature, body,
is_user_defined);
}
+
Declare(name, macro);
if (op) {
if (TryLookupMacro(*op, signature.GetExplicitTypes())) {
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index 6182762a6a..7ccbc851c6 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -62,6 +62,9 @@ class GlobalContext : public ContextualClass<GlobalContext> {
struct PerFileStreams {
std::stringstream csa_headerfile;
std::stringstream csa_ccfile;
+ std::stringstream class_definition_headerfile;
+ std::stringstream class_definition_inline_headerfile;
+ std::stringstream class_definition_ccfile;
};
static PerFileStreams& GeneratedPerFile(SourceId file) {
return Get().generated_per_file_[file];
@@ -74,6 +77,15 @@ class GlobalContext : public ContextualClass<GlobalContext> {
static bool IsInstanceTypesInitialized() {
return Get().instance_types_initialized_;
}
+ static void EnsureInCCOutputList(TorqueMacro* macro) {
+ GlobalContext& c = Get();
+ if (c.macros_for_cc_output_set_.insert(macro).second) {
+ c.macros_for_cc_output_.push_back(macro);
+ }
+ }
+ static const std::vector<TorqueMacro*>& AllMacrosForCCOutput() {
+ return Get().macros_for_cc_output_;
+ }
private:
bool collect_language_server_data_;
@@ -84,6 +96,8 @@ class GlobalContext : public ContextualClass<GlobalContext> {
std::set<std::string> cpp_includes_;
std::map<SourceId, PerFileStreams> generated_per_file_;
std::map<std::string, size_t> fresh_ids_;
+ std::vector<TorqueMacro*> macros_for_cc_output_;
+ std::unordered_set<TorqueMacro*> macros_for_cc_output_set_;
bool instance_types_initialized_ = false;
friend class LanguageServerData;
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index 2f2881fd07..00504b5eff 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -10,6 +10,7 @@
#include "src/base/optional.h"
#include "src/common/globals.h"
+#include "src/torque/cc-generator.h"
#include "src/torque/constants.h"
#include "src/torque/csa-generator.h"
#include "src/torque/declaration-visitor.h"
@@ -56,58 +57,136 @@ const Type* ImplementationVisitor::Visit(Statement* stmt) {
return result;
}
-void ImplementationVisitor::BeginCSAFiles() {
+void ImplementationVisitor::BeginGeneratedFiles() {
+ std::set<SourceId> contains_class_definitions;
+ for (const ClassType* type : TypeOracle::GetClasses()) {
+ if (type->GenerateCppClassDefinitions()) {
+ contains_class_definitions.insert(type->AttributedToFile());
+ }
+ }
+
for (SourceId file : SourceFileMap::AllSources()) {
- std::ostream& source = GlobalContext::GeneratedPerFile(file).csa_ccfile;
- std::ostream& header = GlobalContext::GeneratedPerFile(file).csa_headerfile;
+ // Output beginning of CSA .cc file.
+ {
+ std::ostream& out = GlobalContext::GeneratedPerFile(file).csa_ccfile;
- for (const std::string& include_path : GlobalContext::CppIncludes()) {
- source << "#include " << StringLiteralQuote(include_path) << "\n";
- }
+ for (const std::string& include_path : GlobalContext::CppIncludes()) {
+ out << "#include " << StringLiteralQuote(include_path) << "\n";
+ }
- for (SourceId file : SourceFileMap::AllSources()) {
- source << "#include \"torque-generated/" +
- SourceFileMap::PathFromV8RootWithoutExtension(file) +
- "-tq-csa.h\"\n";
- }
- source << "\n";
-
- source << "namespace v8 {\n"
- << "namespace internal {\n"
- << "\n";
-
- std::string headerDefine =
- "V8_GEN_TORQUE_GENERATED_" +
- UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_H_";
- header << "#ifndef " << headerDefine << "\n";
- header << "#define " << headerDefine << "\n\n";
- header << "#include \"src/builtins/torque-csa-header-includes.h\"\n";
- header << "\n";
+ for (SourceId file : SourceFileMap::AllSources()) {
+ out << "#include \"torque-generated/" +
+ SourceFileMap::PathFromV8RootWithoutExtension(file) +
+ "-tq-csa.h\"\n";
+ }
+ out << "\n";
+
+ out << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "\n";
+ }
+ // Output beginning of CSA .h file.
+ {
+ std::ostream& out = GlobalContext::GeneratedPerFile(file).csa_headerfile;
+ std::string headerDefine =
+ "V8_GEN_TORQUE_GENERATED_" +
+ UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_H_";
+ out << "#ifndef " << headerDefine << "\n";
+ out << "#define " << headerDefine << "\n\n";
+ out << "#include \"src/builtins/torque-csa-header-includes.h\"\n";
+ out << "\n";
+
+ out << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "\n";
+ }
+ // Output beginning of class definition .cc file.
+ {
+ auto& streams = GlobalContext::GeneratedPerFile(file);
+ std::ostream& out = streams.class_definition_ccfile;
+ if (contains_class_definitions.count(file) != 0) {
+ out << "#include \""
+ << SourceFileMap::PathFromV8RootWithoutExtension(file)
+ << "-inl.h\"\n\n";
+ out << "#include \"torque-generated/class-verifiers.h\"\n";
+ out << "#include \"src/objects/instance-type-inl.h\"\n\n";
+ }
- header << "namespace v8 {\n"
- << "namespace internal {\n"
- << "\n";
+ out << "namespace v8 {\n";
+ out << "namespace internal {\n";
+ }
}
}
-void ImplementationVisitor::EndCSAFiles() {
+void ImplementationVisitor::EndGeneratedFiles() {
for (SourceId file : SourceFileMap::AllSources()) {
- std::ostream& source = GlobalContext::GeneratedPerFile(file).csa_ccfile;
- std::ostream& header = GlobalContext::GeneratedPerFile(file).csa_headerfile;
+ {
+ std::ostream& out = GlobalContext::GeneratedPerFile(file).csa_ccfile;
+
+ out << "} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "\n";
+ }
+ {
+ std::ostream& out = GlobalContext::GeneratedPerFile(file).csa_headerfile;
- std::string headerDefine =
- "V8_GEN_TORQUE_GENERATED_" +
- UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_H_";
+ std::string headerDefine =
+ "V8_GEN_TORQUE_GENERATED_" +
+ UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_H_";
+
+ out << "} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "\n";
+ out << "#endif // " << headerDefine << "\n";
+ }
+ {
+ std::ostream& out =
+ GlobalContext::GeneratedPerFile(file).class_definition_ccfile;
+
+ out << "} // namespace v8\n";
+ out << "} // namespace internal\n";
+ }
+ }
+}
- source << "} // namespace internal\n"
- << "} // namespace v8\n"
- << "\n";
+void ImplementationVisitor::BeginRuntimeMacrosFile() {
+ std::ostream& source = runtime_macros_cc_;
+ std::ostream& header = runtime_macros_h_;
- header << "} // namespace internal\n"
- << "} // namespace v8\n"
- << "\n";
- header << "#endif // " << headerDefine << "\n";
+ source << "#include \"torque-generated/runtime-macros.h\"\n\n";
+ source << "#include \"src/torque/runtime-macro-shims.h\"\n";
+ for (const std::string& include_path : GlobalContext::CppIncludes()) {
+ source << "#include " << StringLiteralQuote(include_path) << "\n";
}
+ source << "\n";
+
+ source << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "\n";
+
+ const char* kHeaderDefine = "V8_GEN_TORQUE_GENERATED_RUNTIME_MACROS_H_";
+ header << "#ifndef " << kHeaderDefine << "\n";
+ header << "#define " << kHeaderDefine << "\n\n";
+ header << "#include \"src/builtins/torque-csa-header-includes.h\"\n";
+ header << "\n";
+
+ header << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "\n";
+}
+
+void ImplementationVisitor::EndRuntimeMacrosFile() {
+ std::ostream& source = runtime_macros_cc_;
+ std::ostream& header = runtime_macros_h_;
+
+ source << "} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "\n";
+
+ header << "\n} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "\n";
+ header << "#endif // V8_GEN_TORQUE_GENERATED_RUNTIME_MACROS_H_\n";
}
void ImplementationVisitor::Visit(NamespaceConstant* decl) {
@@ -116,15 +195,15 @@ void ImplementationVisitor::Visit(NamespaceConstant* decl) {
BindingsManagersScope bindings_managers_scope;
- header_out() << " ";
- GenerateFunctionDeclaration(header_out(), "", decl->external_name(),
+ csa_headerfile() << " ";
+ GenerateFunctionDeclaration(csa_headerfile(), "", decl->external_name(),
signature, {});
- header_out() << ";\n";
+ csa_headerfile() << ";\n";
- GenerateFunctionDeclaration(source_out(), "", decl->external_name(),
+ GenerateFunctionDeclaration(csa_ccfile(), "", decl->external_name(),
signature, {});
- source_out() << " {\n";
- source_out() << " compiler::CodeAssembler ca_(state_);\n";
+ csa_ccfile() << " {\n";
+ csa_ccfile() << " compiler::CodeAssembler ca_(state_);\n";
DCHECK(!signature.return_type->IsVoidOrNever());
@@ -134,15 +213,15 @@ void ImplementationVisitor::Visit(NamespaceConstant* decl) {
VisitResult return_result =
GenerateImplicitConvert(signature.return_type, expression_result);
- CSAGenerator csa_generator{assembler().Result(), source_out()};
+ CSAGenerator csa_generator{assembler().Result(), csa_ccfile()};
Stack<std::string> values = *csa_generator.EmitGraph(Stack<std::string>{});
assembler_ = base::nullopt;
- source_out() << " return ";
- CSAGenerator::EmitCSAValue(return_result, values, source_out());
- source_out() << ";\n";
- source_out() << "}\n\n";
+ csa_ccfile() << " return ";
+ CSAGenerator::EmitCSAValue(return_result, values, csa_ccfile());
+ csa_ccfile() << ";\n";
+ csa_ccfile() << "}\n\n";
}
void ImplementationVisitor::Visit(TypeAlias* alias) {
@@ -274,14 +353,21 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
bool has_return_value =
can_return && return_type != TypeOracle::GetVoidType();
- GenerateMacroFunctionDeclaration(header_out(), "", macro);
- header_out() << ";\n";
+ GenerateMacroFunctionDeclaration(csa_headerfile(), macro);
+ csa_headerfile() << ";\n";
- GenerateMacroFunctionDeclaration(source_out(), "", macro);
- source_out() << " {\n";
- source_out() << " compiler::CodeAssembler ca_(state_);\n";
- source_out()
- << " compiler::CodeAssembler::SourcePositionScope pos_scope(&ca_);\n";
+ GenerateMacroFunctionDeclaration(csa_ccfile(), macro);
+ csa_ccfile() << " {\n";
+
+ if (output_type_ == OutputType::kCC) {
+ // For now, generated C++ is only for field offset computations. If we ever
+ // generate C++ code that can allocate, then it should be handlified.
+ csa_ccfile() << " DisallowHeapAllocation no_gc;\n";
+ } else {
+ csa_ccfile() << " compiler::CodeAssembler ca_(state_);\n";
+ csa_ccfile()
+ << " compiler::CodeAssembler::SourcePositionScope pos_scope(&ca_);\n";
+ }
Stack<std::string> lowered_parameters;
Stack<const Type*> lowered_parameter_types;
@@ -363,18 +449,27 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
assembler().Bind(end);
}
- CSAGenerator csa_generator{assembler().Result(), source_out()};
- base::Optional<Stack<std::string>> values =
- csa_generator.EmitGraph(lowered_parameters);
+ base::Optional<Stack<std::string>> values;
+ if (output_type_ == OutputType::kCC) {
+ CCGenerator cc_generator{assembler().Result(), csa_ccfile()};
+ values = cc_generator.EmitGraph(lowered_parameters);
+ } else {
+ CSAGenerator csa_generator{assembler().Result(), csa_ccfile()};
+ values = csa_generator.EmitGraph(lowered_parameters);
+ }
assembler_ = base::nullopt;
if (has_return_value) {
- source_out() << " return ";
- CSAGenerator::EmitCSAValue(return_value, *values, source_out());
- source_out() << ";\n";
+ csa_ccfile() << " return ";
+ if (output_type_ == OutputType::kCC) {
+ CCGenerator::EmitCCValue(return_value, *values, csa_ccfile());
+ } else {
+ CSAGenerator::EmitCSAValue(return_value, *values, csa_ccfile());
+ }
+ csa_ccfile() << ";\n";
}
- source_out() << "}\n\n";
+ csa_ccfile() << "}\n\n";
}
void ImplementationVisitor::Visit(TorqueMacro* macro) {
@@ -416,7 +511,7 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
const std::string& name = builtin->ExternalName();
const Signature& signature = builtin->signature();
- source_out() << "TF_BUILTIN(" << name << ", CodeStubAssembler) {\n"
+ csa_ccfile() << "TF_BUILTIN(" << name << ", CodeStubAssembler) {\n"
<< " compiler::CodeAssemblerState* state_ = state();"
<< " compiler::CodeAssembler ca_(state());\n";
@@ -435,17 +530,17 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
.Position(signature.parameter_names[signature.implicit_count]->pos);
}
- source_out()
- << " Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);\n";
- source_out() << " TNode<IntPtrT> "
+ csa_ccfile() << " TNode<Word32T> argc = UncheckedParameter<Word32T>("
+ << "Descriptor::kJSActualArgumentsCount);\n";
+ csa_ccfile() << " TNode<IntPtrT> "
"arguments_length(ChangeInt32ToIntPtr(UncheckedCast<"
"Int32T>(argc)));\n";
- source_out() << " TNode<RawPtrT> arguments_frame = "
+ csa_ccfile() << " TNode<RawPtrT> arguments_frame = "
"UncheckedCast<RawPtrT>(LoadFramePointer());\n";
- source_out() << " TorqueStructArguments "
+ csa_ccfile() << " TorqueStructArguments "
"torque_arguments(GetFrameArguments(arguments_frame, "
"arguments_length));\n";
- source_out()
+ csa_ccfile()
<< " CodeStubArguments arguments(this, torque_arguments);\n";
parameters.Push("torque_arguments.frame");
@@ -468,32 +563,32 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
const Type* actual_type = signature.parameter_types.types[i];
std::vector<const Type*> expected_types;
if (param_name == "context") {
- source_out() << " TNode<NativeContext> " << generated_name
- << " = UncheckedCast<NativeContext>(Parameter("
- << "Descriptor::kContext));\n";
- source_out() << " USE(" << generated_name << ");\n";
+ csa_ccfile() << " TNode<NativeContext> " << generated_name
+ << " = UncheckedParameter<NativeContext>("
+ << "Descriptor::kContext);\n";
+ csa_ccfile() << " USE(" << generated_name << ");\n";
expected_types = {TypeOracle::GetNativeContextType(),
TypeOracle::GetContextType()};
} else if (param_name == "receiver") {
- source_out()
+ csa_ccfile()
<< " TNode<Object> " << generated_name << " = "
<< (builtin->IsVarArgsJavaScript()
? "arguments.GetReceiver()"
- : "UncheckedCast<Object>(Parameter(Descriptor::kReceiver))")
+ : "UncheckedParameter<Object>(Descriptor::kReceiver)")
<< ";\n";
- source_out() << "USE(" << generated_name << ");\n";
+ csa_ccfile() << "USE(" << generated_name << ");\n";
expected_types = {TypeOracle::GetJSAnyType()};
} else if (param_name == "newTarget") {
- source_out() << " TNode<Object> " << generated_name
- << " = UncheckedCast<Object>(Parameter("
- << "Descriptor::kJSNewTarget));\n";
- source_out() << "USE(" << generated_name << ");\n";
+ csa_ccfile() << " TNode<Object> " << generated_name
+ << " = UncheckedParameter<Object>("
+ << "Descriptor::kJSNewTarget);\n";
+ csa_ccfile() << "USE(" << generated_name << ");\n";
expected_types = {TypeOracle::GetJSAnyType()};
} else if (param_name == "target") {
- source_out() << " TNode<JSFunction> " << generated_name
- << " = UncheckedCast<JSFunction>(Parameter("
- << "Descriptor::kJSTarget));\n";
- source_out() << "USE(" << generated_name << ");\n";
+ csa_ccfile() << " TNode<JSFunction> " << generated_name
+ << " = UncheckedParameter<JSFunction>("
+ << "Descriptor::kJSTarget);\n";
+ csa_ccfile() << "USE(" << generated_name << ");\n";
expected_types = {TypeOracle::GetJSFunctionType()};
} else {
Error(
@@ -519,12 +614,12 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
const bool mark_as_used = signature.implicit_count > i;
std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
&parameter_bindings, mark_as_used);
- source_out() << " " << type->GetGeneratedTypeName() << " " << var
+ csa_ccfile() << " " << type->GetGeneratedTypeName() << " " << var
<< " = "
- << "UncheckedCast<" << type->GetGeneratedTNodeTypeName()
- << ">(Parameter(Descriptor::k"
- << CamelifyString(parameter_name) << "));\n";
- source_out() << " USE(" << var << ");\n";
+ << "UncheckedParameter<" << type->GetGeneratedTNodeTypeName()
+ << ">(Descriptor::k" << CamelifyString(parameter_name)
+ << ");\n";
+ csa_ccfile() << " USE(" << var << ");\n";
}
} else {
@@ -536,18 +631,18 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
const bool mark_as_used = signature.implicit_count > i;
std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
&parameter_bindings, mark_as_used);
- source_out() << " " << type->GetGeneratedTypeName() << " " << var
+ csa_ccfile() << " " << type->GetGeneratedTypeName() << " " << var
<< " = "
- << "UncheckedCast<" << type->GetGeneratedTNodeTypeName()
- << ">(Parameter(";
+ << "UncheckedParameter<" << type->GetGeneratedTNodeTypeName()
+ << ">(";
if (i == 0 && has_context_parameter) {
- source_out() << "Descriptor::kContext";
+ csa_ccfile() << "Descriptor::kContext";
} else {
- source_out() << "Descriptor::ParameterIndex<"
+ csa_ccfile() << "Descriptor::ParameterIndex<"
<< (has_context_parameter ? i - 1 : i) << ">()";
}
- source_out() << "));\n";
- source_out() << " USE(" << var << ");\n";
+ csa_ccfile() << ");\n";
+ csa_ccfile() << " USE(" << var << ");\n";
}
}
assembler_ = CfgAssembler(parameter_types);
@@ -555,11 +650,11 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
if (body_result != TypeOracle::GetNeverType()) {
ReportError("control reaches end of builtin, expected return of a value");
}
- CSAGenerator csa_generator{assembler().Result(), source_out(),
+ CSAGenerator csa_generator{assembler().Result(), csa_ccfile(),
builtin->kind()};
csa_generator.EmitGraph(parameters);
assembler_ = base::nullopt;
- source_out() << "}\n\n";
+ csa_ccfile() << "}\n\n";
}
const Type* ImplementationVisitor::Visit(VarDeclarationStatement* stmt) {
@@ -1255,53 +1350,20 @@ InitializerResults ImplementationVisitor::VisitInitializerResults(
LocationReference ImplementationVisitor::GenerateFieldReference(
VisitResult object, const Field& field, const ClassType* class_type) {
+ if (field.index.has_value()) {
+ return LocationReference::HeapSlice(
+ GenerateCall(class_type->GetSliceMacroName(field), {{object}, {}}));
+ }
+ DCHECK(field.offset.has_value());
StackRange result_range = assembler().TopRange(0);
result_range.Extend(GenerateCopy(object).stack_range());
- VisitResult offset;
- if (field.offset.has_value()) {
- offset =
- VisitResult(TypeOracle::GetConstInt31Type(), ToString(*field.offset));
- offset = GenerateImplicitConvert(TypeOracle::GetIntPtrType(), offset);
- } else {
- StackScope stack_scope(this);
- for (const Field& f : class_type->ComputeAllFields()) {
- if (f.offset) {
- offset =
- VisitResult(TypeOracle::GetConstInt31Type(), ToString(*f.offset));
- }
- if (f.name_and_type.name == field.name_and_type.name) break;
- if (f.index) {
- if (!offset.IsOnStack()) {
- offset = GenerateImplicitConvert(TypeOracle::GetIntPtrType(), offset);
- }
- VisitResult array_length = GenerateArrayLength(object, f);
- size_t element_size;
- std::string element_size_string;
- std::tie(element_size, element_size_string) =
- *SizeOf(f.name_and_type.type);
- VisitResult array_element_size =
- VisitResult(TypeOracle::GetConstInt31Type(), element_size_string);
- // In contrast to the code used for allocation, we don't need overflow
- // checks here because we already know all the offsets fit into memory.
- VisitResult array_size =
- GenerateCall("*", {{array_length, array_element_size}, {}});
- offset = GenerateCall("+", {{offset, array_size}, {}});
- }
- }
- DCHECK(offset.IsOnStack());
- offset = stack_scope.Yield(offset);
- }
+ VisitResult offset =
+ VisitResult(TypeOracle::GetConstInt31Type(), ToString(*field.offset));
+ offset = GenerateImplicitConvert(TypeOracle::GetIntPtrType(), offset);
result_range.Extend(offset.stack_range());
- if (field.index) {
- VisitResult length = GenerateArrayLength(object, field);
- result_range.Extend(length.stack_range());
- const Type* slice_type = TypeOracle::GetSliceType(field.name_and_type.type);
- return LocationReference::HeapSlice(VisitResult(slice_type, result_range));
- } else {
- const Type* type = TypeOracle::GetReferenceType(field.name_and_type.type,
- field.const_qualified);
- return LocationReference::HeapReference(VisitResult(type, result_range));
- }
+ const Type* type = TypeOracle::GetReferenceType(field.name_and_type.type,
+ field.const_qualified);
+ return LocationReference::HeapReference(VisitResult(type, result_range));
}
// This is used to generate field references during initialization, where we can
@@ -1625,25 +1687,30 @@ VisitResult ImplementationVisitor::Visit(SpreadExpression* expr) {
void ImplementationVisitor::GenerateImplementation(const std::string& dir) {
for (SourceId file : SourceFileMap::AllSources()) {
- std::string path_from_root =
- SourceFileMap::PathFromV8RootWithoutExtension(file);
-
- std::string new_source(
- GlobalContext::GeneratedPerFile(file).csa_ccfile.str());
+ std::string base_filename =
+ dir + "/" + SourceFileMap::PathFromV8RootWithoutExtension(file);
+ GlobalContext::PerFileStreams& streams =
+ GlobalContext::GeneratedPerFile(file);
- std::string source_file_name = dir + "/" + path_from_root + "-tq-csa.cc";
- WriteFile(source_file_name, new_source);
- std::string new_header(
- GlobalContext::GeneratedPerFile(file).csa_headerfile.str());
- std::string header_file_name = dir + "/" + path_from_root + "-tq-csa.h";
- WriteFile(header_file_name, new_header);
+ WriteFile(base_filename + "-tq-csa.cc", streams.csa_ccfile.str());
+ WriteFile(base_filename + "-tq-csa.h", streams.csa_headerfile.str());
+ WriteFile(base_filename + "-tq.inc",
+ streams.class_definition_headerfile.str());
+ WriteFile(base_filename + "-tq-inl.inc",
+ streams.class_definition_inline_headerfile.str());
+ WriteFile(base_filename + "-tq.cc", streams.class_definition_ccfile.str());
}
+
+ WriteFile(dir + "/runtime-macros.h", runtime_macros_h_.str());
+ WriteFile(dir + "/runtime-macros.cc", runtime_macros_cc_.str());
}
-void ImplementationVisitor::GenerateMacroFunctionDeclaration(
- std::ostream& o, const std::string& macro_prefix, Macro* macro) {
- GenerateFunctionDeclaration(o, macro_prefix, macro->ExternalName(),
- macro->signature(), macro->parameter_names());
+void ImplementationVisitor::GenerateMacroFunctionDeclaration(std::ostream& o,
+ Macro* macro) {
+ GenerateFunctionDeclaration(
+ o, "",
+ output_type_ == OutputType::kCC ? macro->CCName() : macro->ExternalName(),
+ macro->signature(), macro->parameter_names());
}
std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
@@ -1654,12 +1721,17 @@ std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
if (signature.return_type->IsVoidOrNever()) {
o << "void";
} else {
- o << signature.return_type->GetGeneratedTypeName();
+ o << (output_type_ == OutputType::kCC
+ ? signature.return_type->GetRuntimeType()
+ : signature.return_type->GetGeneratedTypeName());
}
o << " " << macro_prefix << name << "(";
bool first = true;
- if (pass_code_assembler_state) {
+ if (output_type_ == OutputType::kCC) {
+ first = false;
+ o << "Isolate* isolate";
+ } else if (pass_code_assembler_state) {
first = false;
o << "compiler::CodeAssemblerState* state_";
}
@@ -1670,7 +1742,9 @@ std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
first = false;
const Type* parameter_type = signature.types()[i];
const std::string& generated_type_name =
- parameter_type->GetGeneratedTypeName();
+ output_type_ == OutputType::kCC
+ ? parameter_type->GetRuntimeType()
+ : parameter_type->GetGeneratedTypeName();
generated_parameter_names.push_back(ExternalParameterName(
i < parameter_names.size() ? parameter_names[i]->value
@@ -1679,6 +1753,9 @@ std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
}
for (const LabelDeclaration& label_info : signature.labels) {
+ if (output_type_ == OutputType::kCC) {
+ ReportError("Macros that generate runtime code can't have label exits");
+ }
if (!first) o << ", ";
first = false;
generated_parameter_names.push_back(
@@ -2487,7 +2564,7 @@ VisitResult ImplementationVisitor::GenerateCall(
}
}
- bool inline_macro = callable->ShouldBeInlined();
+ bool inline_macro = callable->ShouldBeInlined(output_type_);
std::vector<VisitResult> implicit_arguments;
for (size_t i = 0; i < callable->signature().implicit_count; ++i) {
std::string implicit_name = callable->signature().parameter_names[i]->value;
@@ -2594,7 +2671,18 @@ VisitResult ImplementationVisitor::GenerateCall(
if (is_tailcall) {
ReportError("can't tail call a macro");
}
+
macro->SetUsed();
+
+ // If we're currently generating a C++ macro and it's calling another macro,
+ // then we need to make sure that we also generate C++ code for the called
+ // macro.
+ if (output_type_ == OutputType::kCC && !inline_macro) {
+ if (auto* torque_macro = TorqueMacro::DynamicCast(macro)) {
+ GlobalContext::EnsureInCCOutputList(torque_macro);
+ }
+ }
+
if (return_type->IsConstexpr()) {
DCHECK_EQ(0, arguments.labels.size());
std::stringstream result;
@@ -2774,6 +2862,15 @@ VisitResult ImplementationVisitor::GenerateCall(
result << constexpr_arguments[0];
result << ")";
return VisitResult(return_type, result.str());
+ } else if (intrinsic->ExternalName() == "%IndexedFieldLength") {
+ const Type* type = specialization_types[0];
+ const ClassType* class_type = ClassType::DynamicCast(type);
+ if (!class_type) {
+ ReportError("%IndexedFieldLength must take a class type parameter");
+ }
+ const Field& field =
+ class_type->LookupField(StringLiteralUnquote(constexpr_arguments[0]));
+ return GenerateArrayLength(VisitResult(type, argument_range), field);
} else {
assembler().Emit(CallIntrinsicInstruction{intrinsic, specialization_types,
constexpr_arguments});
@@ -3065,6 +3162,7 @@ void ImplementationVisitor::VisitAllDeclarables() {
CurrentCallable::Scope current_callable(nullptr);
const std::vector<std::unique_ptr<Declarable>>& all_declarables =
GlobalContext::AllDeclarables();
+
// This has to be an index-based loop because all_declarables can be extended
// during the loop.
for (size_t i = 0; i < all_declarables.size(); ++i) {
@@ -3074,6 +3172,19 @@ void ImplementationVisitor::VisitAllDeclarables() {
// Recover from compile errors here. The error is recorded already.
}
}
+
+ // Do the same for macros which generate C++ code.
+ output_type_ = OutputType::kCC;
+ const std::vector<TorqueMacro*>& cc_macros =
+ GlobalContext::AllMacrosForCCOutput();
+ for (size_t i = 0; i < cc_macros.size(); ++i) {
+ try {
+ Visit(static_cast<Declarable*>(cc_macros[i]));
+ } catch (TorqueAbortCompilation&) {
+ // Recover from compile errors here. The error is recorded already.
+ }
+ }
+ output_type_ = OutputType::kCSA;
}
void ImplementationVisitor::Visit(Declarable* declarable) {
@@ -3082,7 +3193,7 @@ void ImplementationVisitor::Visit(Declarable* declarable) {
CurrentFileStreams::Scope current_file_streams(
&GlobalContext::GeneratedPerFile(declarable->Position().source));
if (Callable* callable = Callable::DynamicCast(declarable)) {
- if (!callable->ShouldGenerateExternalCode())
+ if (!callable->ShouldGenerateExternalCode(output_type_))
CurrentFileStreams::Get() = nullptr;
}
switch (declarable->kind()) {
@@ -3605,6 +3716,17 @@ base::Optional<std::vector<Field>> GetOrderedUniqueIndexFields(
}
void CppClassGenerator::GenerateClass() {
+ hdr_ << "\n";
+ hdr_ << "// Alias for HeapObject::Is" << name_
+ << "() that avoids inlining.\n";
+ hdr_ << "V8_EXPORT_PRIVATE bool Is" << name_ << "_NonInline(HeapObject o);\n";
+ hdr_ << "\n";
+
+ impl_ << "\n";
+ impl_ << "bool Is" << name_ << "_NonInline(HeapObject o) {\n";
+ impl_ << " return o.Is" << name_ << "();\n";
+ impl_ << "}\n\n";
+
hdr_ << template_decl() << "\n";
hdr_ << "class " << gen_name_ << " : public P {\n";
hdr_ << " static_assert(std::is_same<" << name_ << ", D>::value,\n"
@@ -3707,7 +3829,7 @@ void CppClassGenerator::GenerateClass() {
hdr_ << "};\n\n";
- if (!type_->IsExtern()) {
+ if (type_->ShouldGenerateFullClassDefinition()) {
GenerateClassExport(type_, hdr_, inl_);
}
}
@@ -3732,7 +3854,7 @@ void CppClassGenerator::GenerateClassConstructors() {
<< name_ << ".\");\n";
hdr_ << " }\n";
- hdr_ << "protected:\n";
+ hdr_ << " protected:\n";
hdr_ << " inline explicit " << gen_name_ << "(Address ptr);\n";
hdr_ << " // Special-purpose constructor for subclasses that have fast "
"paths where\n";
@@ -3743,16 +3865,17 @@ void CppClassGenerator::GenerateClassConstructors() {
inl_ << "template<class D, class P>\n";
inl_ << "inline " << gen_name_T_ << "::" << gen_name_ << "(Address ptr)\n";
inl_ << " : P(ptr) {\n";
- inl_ << " SLOW_DCHECK(this->Is" << name_ << "());\n";
+ inl_ << " SLOW_DCHECK(Is" << name_ << "_NonInline(*this));\n";
inl_ << "}\n";
inl_ << "template<class D, class P>\n";
inl_ << "inline " << gen_name_T_ << "::" << gen_name_
<< "(Address ptr, HeapObject::AllowInlineSmiStorage allow_smi)\n";
inl_ << " : P(ptr, allow_smi) {\n";
- inl_ << " SLOW_DCHECK((allow_smi == "
- "HeapObject::AllowInlineSmiStorage::kAllowBeingASmi && "
- << "this->IsSmi()) || this->Is" << name_ << "());\n";
+ inl_ << " SLOW_DCHECK("
+ << "(allow_smi == HeapObject::AllowInlineSmiStorage::kAllowBeingASmi"
+ " && this->IsSmi()) || Is"
+ << name_ << "_NonInline(*this));\n";
inl_ << "}\n";
}
@@ -3767,15 +3890,15 @@ std::string GenerateRuntimeTypeCheck(const Type* type,
type_check << value << ".IsCleared()";
at_start = false;
}
- for (const RuntimeType& runtime_type : type->GetRuntimeTypes()) {
+ for (const TypeChecker& runtime_type : type->GetTypeCheckers()) {
if (!at_start) type_check << " || ";
at_start = false;
if (maybe_object) {
bool strong = runtime_type.weak_ref_to.empty();
- if (strong && runtime_type.type == "MaybeObject") {
- // Rather than a generic Weak<T>, this is a basic type Tagged or
- // WeakHeapObject. We can't validate anything more about the type of
- // the object pointed to, so just check that it's weak.
+ if (strong && runtime_type.type == WEAK_HEAP_OBJECT) {
+ // Rather than a generic Weak<T>, this is the basic type WeakHeapObject.
+ // We can't validate anything more about the type of the object pointed
+ // to, so just check that it's weak.
type_check << value << ".IsWeak()";
} else {
type_check << "(" << (strong ? "!" : "") << value << ".IsWeak() && "
@@ -3954,7 +4077,7 @@ void CppClassGenerator::GenerateFieldAccessorForTagged(const Field& f) {
std::string offset = "k" + CamelifyString(name) + "Offset";
bool strong_pointer = field_type->IsSubtypeOf(TypeOracle::GetObjectType());
- std::string type = field_type->GetRuntimeType();
+ std::string type = field_type->UnhandlifiedCppTypeName();
// Generate declarations in header.
if (!field_type->IsClassType() && field_type != TypeOracle::GetObjectType()) {
hdr_ << " // Torque type: " << field_type->ToString() << "\n";
@@ -3962,7 +4085,7 @@ void CppClassGenerator::GenerateFieldAccessorForTagged(const Field& f) {
hdr_ << " inline " << type << " " << name << "(" << (f.index ? "int i" : "")
<< ") const;\n";
- hdr_ << " inline " << type << " " << name << "(const Isolate* isolates"
+ hdr_ << " inline " << type << " " << name << "(IsolateRoot isolates"
<< (f.index ? ", int i" : "") << ") const;\n";
hdr_ << " inline void set_" << name << "(" << (f.index ? "int i, " : "")
<< type << " value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);\n\n";
@@ -3973,15 +4096,14 @@ void CppClassGenerator::GenerateFieldAccessorForTagged(const Field& f) {
inl_ << "template <class D, class P>\n";
inl_ << type << " " << gen_name_ << "<D, P>::" << name << "("
<< (f.index ? "int i" : "") << ") const {\n";
- inl_ << " const Isolate* isolate = GetIsolateForPtrCompr(*this);\n";
+ inl_ << " IsolateRoot isolate = GetIsolateForPtrCompr(*this);\n";
inl_ << " return " << gen_name_ << "::" << name << "(isolate"
<< (f.index ? ", i" : "") << ");\n";
inl_ << "}\n";
inl_ << "template <class D, class P>\n";
inl_ << type << " " << gen_name_ << "<D, P>::" << name
- << "(const Isolate* isolate" << (f.index ? ", int i" : "")
- << ") const {\n";
+ << "(IsolateRoot isolate" << (f.index ? ", int i" : "") << ") const {\n";
// TODO(tebbi): The distinction between relaxed and non-relaxed accesses here
// is pretty arbitrary and just tries to preserve what was there before.
@@ -4031,35 +4153,6 @@ void CppClassGenerator::GenerateFieldAccessorForTagged(const Field& f) {
inl_ << "}\n\n";
}
-void EmitClassDefinitionHeadersIncludes(const std::string& basename,
- std::stringstream& header,
- std::stringstream& inline_header) {
- header << "#include \"src/objects/objects.h\"\n";
- header << "#include \"src/objects/heap-object.h\"\n";
- header << "#include \"src/objects/smi.h\"\n";
- header << "#include \"torque-generated/field-offsets.h\"\n";
- header << "#include <type_traits>\n\n";
-
- inline_header << "#include \"torque-generated/class-definitions.h\"\n";
- inline_header << "#include \"src/objects/js-function.h\"\n";
- inline_header << "#include \"src/objects/js-objects.h\"\n";
- inline_header << "#include \"src/objects/js-promise.h\"\n";
- inline_header << "#include \"src/objects/js-weak-refs.h\"\n";
- inline_header << "#include \"src/objects/module.h\"\n";
- inline_header << "#include \"src/objects/objects-inl.h\"\n";
- inline_header << "#include \"src/objects/script.h\"\n";
- inline_header << "#include \"src/objects/shared-function-info.h\"\n";
- inline_header << "#include \"src/objects/tagged-field.h\"\n\n";
-}
-
-void EmitClassDefinitionHeadersForwardDeclarations(std::stringstream& header) {
- // Generate forward declarations for every class.
- for (const ClassType* type : TypeOracle::GetClasses()) {
- header << "class " << type->GetGeneratedTNodeTypeName() << ";\n";
- }
- header << "using BuiltinPtr = Smi;\n\n";
-}
-
void GenerateStructLayoutDescription(std::ostream& header,
const StructType* type) {
header << "struct TorqueGenerated" << CamelifyString(type->name())
@@ -4077,125 +4170,45 @@ void GenerateStructLayoutDescription(std::ostream& header,
void ImplementationVisitor::GenerateClassDefinitions(
const std::string& output_directory) {
- std::stringstream external_header;
- std::stringstream inline_external_header;
- std::stringstream internal_header;
- std::stringstream inline_internal_header;
- std::stringstream exported_header;
- std::stringstream inline_exported_header;
- std::stringstream implementation;
std::stringstream factory_header;
std::stringstream factory_impl;
- std::string basename = "class-definitions";
- std::string internal_basename = "internal-" + basename;
- std::string exported_basename = "exported-" + basename;
- std::string file_basename = output_directory + "/" + basename;
- std::string internal_file_basename =
- output_directory + "/" + internal_basename;
- std::string exported_file_basename =
- output_directory + "/" + exported_basename;
std::string factory_basename = "factory";
- std::string factory_file_basename = output_directory + "/" + factory_basename;
-
- {
- IncludeGuardScope header_guard(external_header, basename + ".h");
-
- IncludeGuardScope inline_header_guard(inline_external_header,
- basename + "-inl.h");
-
- IncludeGuardScope internal_header_guard(internal_header,
- internal_basename + ".h");
-
- IncludeGuardScope internal_inline_header_guard(
- inline_internal_header, internal_basename + "-inl.h");
-
- IncludeGuardScope exported_header_guard(exported_header,
- exported_basename + ".h");
-
- IncludeGuardScope exported_inline_header_guard(
- inline_exported_header, exported_basename + "-inl.h");
-
- internal_header << "#include \"torque-generated/class-definitions.h\"\n";
- internal_header << "#include \"src/objects/fixed-array.h\"\n";
- inline_internal_header
- << "#include \"torque-generated/internal-class-definitions.h\"\n";
- inline_internal_header
- << "#include \"torque-generated/class-definitions-inl.h\"\n";
-
- exported_header << "#include \"src/objects/fixed-array.h\"\n";
- exported_header << "#include \"torque-generated/class-definitions.h\"\n";
- inline_exported_header
- << "#include \"torque-generated/exported-class-definitions.h\"\n";
- inline_exported_header << "#include \"src/objects/fixed-array-inl.h\"\n";
-
- EmitClassDefinitionHeadersIncludes(basename, external_header,
- inline_external_header);
-
- EmitClassDefinitionHeadersIncludes(internal_basename, internal_header,
- inline_internal_header);
-
- IncludeObjectMacrosScope header_macros(external_header);
- IncludeObjectMacrosScope inline_header_macros(inline_external_header);
-
- IncludeObjectMacrosScope internal_header_macros(internal_header);
- IncludeObjectMacrosScope internal_inline_header_macros(
- inline_internal_header);
- IncludeObjectMacrosScope exported_header_macros(exported_header);
- IncludeObjectMacrosScope exported_inline_header_macros(
- inline_exported_header);
-
- NamespaceScope header_namespaces(external_header, {"v8", "internal"});
- NamespaceScope inline_header_namespaces(inline_external_header,
- {"v8", "internal"});
- NamespaceScope internal_header_namespaces(internal_header,
- {"v8", "internal"});
- NamespaceScope internal_inline_header_namespaces(inline_internal_header,
- {"v8", "internal"});
- NamespaceScope exported_header_namespaces(exported_header,
- {"v8", "internal"});
- NamespaceScope exported_inline_header_namespaces(inline_exported_header,
- {"v8", "internal"});
-
- EmitClassDefinitionHeadersForwardDeclarations(external_header);
- EmitClassDefinitionHeadersForwardDeclarations(internal_header);
+ std::stringstream forward_declarations;
+ std::string forward_declarations_filename = "class-forward-declarations.h";
+ {
factory_impl << "#include \"src/heap/factory.h\"\n";
factory_impl << "#include \"src/heap/factory-inl.h\"\n";
factory_impl << "#include \"src/heap/heap.h\"\n";
factory_impl << "#include \"src/heap/heap-inl.h\"\n";
- factory_impl << "#include \"src/execution/isolate.h\"\n\n";
- factory_impl << "#include "
- "\"torque-generated/internal-class-definitions-inl.h\"\n\n";
+ factory_impl << "#include \"src/execution/isolate.h\"\n";
factory_impl << "#include "
- "\"torque-generated/exported-class-definitions-inl.h\"\n\n";
+ "\"src/objects/all-objects-inl.h\"\n\n";
NamespaceScope factory_impl_namespaces(factory_impl, {"v8", "internal"});
factory_impl << "\n";
- implementation << "#include \"torque-generated/class-definitions.h\"\n\n";
- implementation << "#include \"torque-generated/class-verifiers.h\"\n\n";
- implementation
- << "#include \"src/objects/class-definitions-tq-deps-inl.h\"\n\n";
- implementation
- << "#include "
- "\"torque-generated/internal-class-definitions-inl.h\"\n\n";
- implementation
- << "#include "
- "\"torque-generated/exported-class-definitions-inl.h\"\n\n";
- NamespaceScope implementation_namespaces(implementation,
- {"v8", "internal"});
+ IncludeGuardScope include_guard(forward_declarations,
+ forward_declarations_filename);
+ NamespaceScope forward_declarations_namespaces(forward_declarations,
+ {"v8", "internal"});
std::set<const StructType*, TypeLess> structs_used_in_classes;
+ // Emit forward declarations.
+ for (const ClassType* type : TypeOracle::GetClasses()) {
+ auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
+ std::ostream& header = streams.class_definition_headerfile;
+ header << "class " << type->GetGeneratedTNodeTypeName() << ";\n";
+ forward_declarations << "class " << type->GetGeneratedTNodeTypeName()
+ << ";\n";
+ }
+
for (const ClassType* type : TypeOracle::GetClasses()) {
- std::stringstream& header =
- type->IsExtern()
- ? external_header
- : type->ShouldExport() ? exported_header : internal_header;
- std::stringstream& inline_header =
- type->IsExtern() ? inline_external_header
- : type->ShouldExport() ? inline_exported_header
- : inline_internal_header;
+ auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
+ std::ostream& header = streams.class_definition_headerfile;
+ std::ostream& inline_header = streams.class_definition_inline_headerfile;
+ std::ostream& implementation = streams.class_definition_ccfile;
if (type->GenerateCppClassDefinitions()) {
CppClassGenerator g(type, header, inline_header, implementation);
@@ -4207,7 +4220,8 @@ void ImplementationVisitor::GenerateClassDefinitions(
structs_used_in_classes.insert(*field_as_struct);
}
}
- if (type->ShouldExport() && !type->IsAbstract()) {
+ if (type->ShouldExport() && !type->IsAbstract() &&
+ !type->HasCustomMap()) {
factory_header << type->HandlifiedCppTypeName() << " New"
<< type->name() << "(";
factory_impl << type->HandlifiedCppTypeName() << " Factory::New"
@@ -4251,9 +4265,12 @@ void ImplementationVisitor::GenerateClassDefinitions(
factory_impl << " "
"isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>"
"(size, allocation_type);\n";
+ factory_impl << " WriteBarrierMode write_barrier_mode =\n"
+ << " allocation_type == AllocationType::kYoung\n"
+ << " ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;\n";
factory_impl << " result.set_map_after_allocation(roots."
<< SnakeifyString(type->name())
- << "_map(), SKIP_WRITE_BARRIER);\n";
+ << "_map(), write_barrier_mode);\n";
factory_impl << " " << type->HandlifiedCppTypeName()
<< " result_handle(" << type->name()
<< "::cast(result), isolate());\n";
@@ -4267,7 +4284,7 @@ void ImplementationVisitor::GenerateClassDefinitions(
TypeOracle::GetTaggedType()) &&
!f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType())) {
factory_impl << "*" << f.name_and_type.name
- << ", SKIP_WRITE_BARRIER";
+ << ", write_barrier_mode";
} else {
factory_impl << f.name_and_type.name;
}
@@ -4281,20 +4298,20 @@ void ImplementationVisitor::GenerateClassDefinitions(
}
for (const StructType* type : structs_used_in_classes) {
+ std::ostream& header =
+ GlobalContext::GeneratedPerFile(type->GetPosition().source)
+ .class_definition_headerfile;
if (type != TypeOracle::GetFloat64OrHoleType()) {
- GenerateStructLayoutDescription(external_header, type);
+ GenerateStructLayoutDescription(header, type);
}
}
}
- WriteFile(file_basename + ".h", external_header.str());
- WriteFile(file_basename + "-inl.h", inline_external_header.str());
- WriteFile(file_basename + ".cc", implementation.str());
- WriteFile(internal_file_basename + ".h", internal_header.str());
- WriteFile(internal_file_basename + "-inl.h", inline_internal_header.str());
- WriteFile(exported_file_basename + ".h", exported_header.str());
- WriteFile(exported_file_basename + "-inl.h", inline_exported_header.str());
- WriteFile(factory_file_basename + ".inc", factory_header.str());
- WriteFile(factory_file_basename + ".cc", factory_impl.str());
+ WriteFile(output_directory + "/" + factory_basename + ".inc",
+ factory_header.str());
+ WriteFile(output_directory + "/" + factory_basename + ".cc",
+ factory_impl.str());
+ WriteFile(output_directory + "/" + forward_declarations_filename,
+ forward_declarations.str());
}
namespace {
@@ -4305,7 +4322,7 @@ void GeneratePrintDefinitionsForClass(std::ostream& impl, const ClassType* type,
impl << template_params << "\n";
impl << "void " << gen_name_T << "::" << type->name()
<< "Print(std::ostream& os) {\n";
- impl << " this->PrintHeader(os, \"" << gen_name << "\");\n";
+ impl << " this->PrintHeader(os, \"" << type->name() << "\");\n";
auto hierarchy = type->GetHierarchy();
std::map<std::string, const AggregateType*> field_names;
for (const AggregateType* aggregate_type : hierarchy) {
@@ -4340,14 +4357,8 @@ void ImplementationVisitor::GeneratePrintDefinitions(
{
IfDefScope object_print(impl, "OBJECT_PRINT");
- impl << "#include \"src/objects/objects.h\"\n\n";
impl << "#include <iosfwd>\n\n";
- impl << "#include "
- "\"torque-generated/internal-class-definitions-inl.h\"\n";
- impl << "#include "
- "\"torque-generated/exported-class-definitions-inl.h\"\n";
- impl << "#include \"src/objects/struct-inl.h\"\n\n";
- impl << "#include \"src/objects/template-objects-inl.h\"\n\n";
+ impl << "#include \"src/objects/all-objects-inl.h\"\n\n";
NamespaceScope impl_namespaces(impl, {"v8", "internal"});
@@ -4532,8 +4543,10 @@ void ImplementationVisitor::GenerateBodyDescriptors(
if (type->size().SingleValue()) {
h_contents << " return " << *type->size().SingleValue() << ";\n";
} else {
+ // We use an unchecked_cast here because this is used for concurrent
+ // marking, where we shouldn't re-read the map.
h_contents << " return " << name
- << "::cast(raw_object).AllocatedSize();\n";
+ << "::unchecked_cast(raw_object).AllocatedSize();\n";
}
h_contents << " }\n\n";
@@ -4548,10 +4561,9 @@ namespace {
// Generate verification code for a single piece of class data, which might be
// nested within a struct or might be a single element in an indexed field (or
// both).
-void GenerateFieldValueVerifier(const std::string& class_name,
- const Field& class_field,
- const Field& leaf_field, size_t struct_offset,
- std::string field_size,
+void GenerateFieldValueVerifier(const std::string& class_name, bool indexed,
+ std::string offset, const Field& leaf_field,
+ std::string indexed_field_size,
std::ostream& cc_contents) {
const Type* field_type = leaf_field.name_and_type.type;
@@ -4560,17 +4572,15 @@ void GenerateFieldValueVerifier(const std::string& class_name,
const char* object_type = maybe_object ? "MaybeObject" : "Object";
const char* verify_fn =
maybe_object ? "VerifyMaybeObjectPointer" : "VerifyPointer";
- std::string index_offset = std::to_string(struct_offset);
- if (class_field.index) {
- index_offset += " + i * " + field_size;
+ if (indexed) {
+ offset += " + i * " + indexed_field_size;
}
// Name the local var based on the field name for nicer CHECK output.
const std::string value = leaf_field.name_and_type.name + "__value";
// Read the field.
cc_contents << " " << object_type << " " << value << " = TaggedField<"
- << object_type << ", " << *class_field.offset << ">::load(o, "
- << index_offset << ");\n";
+ << object_type << ">::load(o, " << offset << ");\n";
// Call VerifyPointer or VerifyMaybeObjectPointer on it.
cc_contents << " " << object_type << "::" << verify_fn << "(isolate, "
@@ -4601,49 +4611,49 @@ void GenerateClassFieldVerifier(const std::string& class_name,
// Do not verify if the field may be uninitialized.
if (TypeOracle::GetUninitializedType()->IsSubtypeOf(field_type)) return;
+ std::string field_start_offset;
if (f.index) {
- base::Optional<NameAndType> array_length =
- ExtractSimpleFieldArraySize(class_type, *f.index);
- if (!array_length) {
- Error("Cannot generate verifier for array field with complex length.")
- .Position((*f.index)->pos)
- .Throw();
- }
-
- std::string length_field_offset =
- class_name + "::k" + CamelifyString(array_length->name) + "Offset";
- cc_contents << " for (int i = 0; i < ";
- if (array_length->type == TypeOracle::GetSmiType()) {
- // We already verified the index field because it was listed earlier, so
- // we can assume it's safe to read here.
- cc_contents << "TaggedField<Smi, " << length_field_offset
- << ">::load(o).value()";
- } else {
- const Type* constexpr_version = array_length->type->ConstexprVersion();
- if (constexpr_version == nullptr) {
- Error("constexpr representation for type ",
- array_length->type->ToString(),
- " is required due to usage as index")
- .Position(f.pos);
- }
- cc_contents << "o.ReadField<" << constexpr_version->GetGeneratedTypeName()
- << ">(" << length_field_offset << ")";
- }
- cc_contents << "; ++i) {\n";
+ field_start_offset = f.name_and_type.name + "__offset";
+ std::string length = f.name_and_type.name + "__length";
+ cc_contents << " intptr_t " << field_start_offset << ", " << length
+ << ";\n";
+ cc_contents << " std::tie(std::ignore, " << field_start_offset << ", "
+ << length << ") = "
+ << Callable::PrefixNameForCCOutput(
+ class_type.GetSliceMacroName(f))
+ << "(isolate, o);\n";
+
+ // Slices use intptr, but TaggedField<T>.load() uses int, so verify that
+ // such a cast is valid.
+ cc_contents << " CHECK_EQ(" << field_start_offset << ", static_cast<int>("
+ << field_start_offset << "));\n";
+ cc_contents << " CHECK_EQ(" << length << ", static_cast<int>(" << length
+ << "));\n";
+ field_start_offset = "static_cast<int>(" + field_start_offset + ")";
+ length = "static_cast<int>(" + length + ")";
+
+ cc_contents << " for (int i = 0; i < " << length << "; ++i) {\n";
} else {
+ // Non-indexed fields have known offsets.
+ field_start_offset = std::to_string(*f.offset);
cc_contents << " {\n";
}
if (auto struct_type = field_type->StructSupertype()) {
- for (const Field& field : (*struct_type)->fields()) {
- if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
- GenerateFieldValueVerifier(class_name, f, field, *field.offset,
- std::to_string((*struct_type)->PackedSize()),
- cc_contents);
+ for (const Field& struct_field : (*struct_type)->fields()) {
+ if (struct_field.name_and_type.type->IsSubtypeOf(
+ TypeOracle::GetTaggedType())) {
+ GenerateFieldValueVerifier(
+ class_name, f.index.has_value(),
+ field_start_offset + " + " + std::to_string(*struct_field.offset),
+ struct_field, std::to_string((*struct_type)->PackedSize()),
+ cc_contents);
}
}
} else {
- GenerateFieldValueVerifier(class_name, f, f, 0, "kTaggedSize", cc_contents);
+ GenerateFieldValueVerifier(class_name, f.index.has_value(),
+ field_start_offset, f, "kTaggedSize",
+ cc_contents);
}
cc_contents << " }\n";
@@ -4668,9 +4678,8 @@ void ImplementationVisitor::GenerateClassVerifiers(
}
cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
cc_contents << "#include "
- "\"torque-generated/internal-class-definitions-inl.h\"\n";
- cc_contents << "#include "
- "\"torque-generated/exported-class-definitions-inl.h\"\n";
+ "\"src/objects/all-objects-inl.h\"\n";
+ cc_contents << "#include \"torque-generated/runtime-macros.h\"\n";
IncludeObjectMacrosScope object_macros(cc_contents);
@@ -4781,10 +4790,6 @@ void ImplementationVisitor::GenerateExportedMacrosAssembler(
h_contents << "#include \"src/compiler/code-assembler.h\"\n";
h_contents << "#include \"src/execution/frames.h\"\n";
h_contents << "#include \"torque-generated/csa-types.h\"\n";
- h_contents
- << "#include \"torque-generated/internal-class-definitions.h\"\n";
- h_contents
- << "#include \"torque-generated/exported-class-definitions.h\"\n";
cc_contents << "#include \"src/objects/fixed-array-inl.h\"\n";
cc_contents << "#include \"src/objects/free-space.h\"\n";
cc_contents << "#include \"src/objects/js-regexp-string-iterator.h\"\n";
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index 960f931435..8846b43502 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -552,8 +552,12 @@ class ImplementationVisitor {
const Type* Visit(DebugStatement* stmt);
const Type* Visit(AssertStatement* stmt);
- void BeginCSAFiles();
- void EndCSAFiles();
+ void BeginGeneratedFiles();
+ void EndGeneratedFiles();
+ // TODO(tebbi): Switch to per-file generation for runtime macros and merge
+ // these functions into {Begin,End}GeneratedFiles().
+ void BeginRuntimeMacrosFile();
+ void EndRuntimeMacrosFile();
void GenerateImplementation(const std::string& dir);
@@ -727,7 +731,6 @@ class ImplementationVisitor {
Block* false_block);
void GenerateMacroFunctionDeclaration(std::ostream& o,
- const std::string& macro_prefix,
Macro* macro);
std::vector<std::string> GenerateFunctionDeclaration(
std::ostream& o, const std::string& macro_prefix, const std::string& name,
@@ -760,18 +763,39 @@ class ImplementationVisitor {
size_t i);
std::string ExternalParameterName(const std::string& name);
- std::ostream& source_out() {
+ std::ostream& csa_ccfile() {
if (auto* streams = CurrentFileStreams::Get()) {
- return streams->csa_ccfile;
+ return output_type_ == OutputType::kCSA ? streams->csa_ccfile
+ : runtime_macros_cc_;
}
return null_stream_;
}
- std::ostream& header_out() {
+ std::ostream& csa_headerfile() {
if (auto* streams = CurrentFileStreams::Get()) {
- return streams->csa_headerfile;
+ return output_type_ == OutputType::kCSA ? streams->csa_headerfile
+ : runtime_macros_h_;
}
return null_stream_;
}
+ std::ostream& class_definition_headerfile() {
+ if (auto* streams = CurrentFileStreams::Get()) {
+ return streams->class_definition_headerfile;
+ }
+ return null_stream_;
+ }
+ std::ostream& class_definition_inline_headerfile() {
+ if (auto* streams = CurrentFileStreams::Get()) {
+ return streams->class_definition_inline_headerfile;
+ }
+ return null_stream_;
+ }
+ std::ostream& class_definition_ccfile() {
+ if (auto* streams = CurrentFileStreams::Get()) {
+ return streams->class_definition_ccfile;
+ }
+ return null_stream_;
+ }
+
CfgAssembler& assembler() { return *assembler_; }
void SetReturnValue(VisitResult return_value) {
@@ -818,6 +842,16 @@ class ImplementationVisitor {
// the value to load.
std::unordered_map<const Expression*, const Identifier*>
bitfield_expressions_;
+
+ // The contents of the runtime macros output files. These contain all Torque
+ // macros that have been generated using the C++ backend. They're not yet
+ // split per source file like CSA macros, but eventually we should change them
+ // to generate -inl.inc files so that callers can easily inline their
+ // contents.
+ std::stringstream runtime_macros_cc_;
+ std::stringstream runtime_macros_h_;
+
+ OutputType output_type_ = OutputType::kCSA;
};
void ReportAllUnusedMacros();
diff --git a/deps/v8/src/torque/instance-type-generator.cc b/deps/v8/src/torque/instance-type-generator.cc
index cb45a7d801..1e2423deba 100644
--- a/deps/v8/src/torque/instance-type-generator.cc
+++ b/deps/v8/src/torque/instance-type-generator.cc
@@ -451,7 +451,7 @@ void ImplementationVisitor::GenerateInstanceTypes(
if (type->IsExtern()) continue;
torque_defined_class_list << " V(" << upper_case_name << ") \\\n";
- if (type->IsAbstract()) continue;
+ if (type->IsAbstract() || type->HasCustomMap()) continue;
torque_defined_map_csa_list << " V(_, " << upper_case_name << "Map, "
<< lower_case_name << "_map, "
<< upper_case_name << ") \\\n";
diff --git a/deps/v8/src/torque/instructions.h b/deps/v8/src/torque/instructions.h
index 528d5c742e..69dfbd8fc3 100644
--- a/deps/v8/src/torque/instructions.h
+++ b/deps/v8/src/torque/instructions.h
@@ -24,32 +24,40 @@ class Macro;
class NamespaceConstant;
class RuntimeFunction;
-#define TORQUE_INSTRUCTION_LIST(V) \
- V(PeekInstruction) \
- V(PokeInstruction) \
- V(DeleteRangeInstruction) \
- V(PushUninitializedInstruction) \
- V(PushBuiltinPointerInstruction) \
- V(LoadReferenceInstruction) \
- V(StoreReferenceInstruction) \
- V(LoadBitFieldInstruction) \
- V(StoreBitFieldInstruction) \
- V(CallCsaMacroInstruction) \
- V(CallIntrinsicInstruction) \
- V(NamespaceConstantInstruction) \
- V(CallCsaMacroAndBranchInstruction) \
- V(CallBuiltinInstruction) \
- V(CallRuntimeInstruction) \
- V(CallBuiltinPointerInstruction) \
- V(BranchInstruction) \
- V(ConstexprBranchInstruction) \
- V(GotoInstruction) \
- V(GotoExternalInstruction) \
- V(ReturnInstruction) \
- V(PrintConstantStringInstruction) \
- V(AbortInstruction) \
+// Instructions where all backends generate code the same way.
+#define TORQUE_BACKEND_AGNOSTIC_INSTRUCTION_LIST(V) \
+ V(PeekInstruction) \
+ V(PokeInstruction) \
+ V(DeleteRangeInstruction)
+
+// Instructions where different backends may generate different code.
+#define TORQUE_BACKEND_DEPENDENT_INSTRUCTION_LIST(V) \
+ V(PushUninitializedInstruction) \
+ V(PushBuiltinPointerInstruction) \
+ V(LoadReferenceInstruction) \
+ V(StoreReferenceInstruction) \
+ V(LoadBitFieldInstruction) \
+ V(StoreBitFieldInstruction) \
+ V(CallCsaMacroInstruction) \
+ V(CallIntrinsicInstruction) \
+ V(NamespaceConstantInstruction) \
+ V(CallCsaMacroAndBranchInstruction) \
+ V(CallBuiltinInstruction) \
+ V(CallRuntimeInstruction) \
+ V(CallBuiltinPointerInstruction) \
+ V(BranchInstruction) \
+ V(ConstexprBranchInstruction) \
+ V(GotoInstruction) \
+ V(GotoExternalInstruction) \
+ V(ReturnInstruction) \
+ V(PrintConstantStringInstruction) \
+ V(AbortInstruction) \
V(UnsafeCastInstruction)
+#define TORQUE_INSTRUCTION_LIST(V) \
+ TORQUE_BACKEND_AGNOSTIC_INSTRUCTION_LIST(V) \
+ TORQUE_BACKEND_DEPENDENT_INSTRUCTION_LIST(V)
+
#define TORQUE_INSTRUCTION_BOILERPLATE() \
static const InstructionKind kKind; \
std::unique_ptr<InstructionBase> Clone() const override; \
diff --git a/deps/v8/src/torque/runtime-macro-shims.h b/deps/v8/src/torque/runtime-macro-shims.h
new file mode 100644
index 0000000000..89e566bc62
--- /dev/null
+++ b/deps/v8/src/torque/runtime-macro-shims.h
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains runtime implementations of a few macros that are defined
+// as external in Torque, so that generated runtime code can work.
+
+#ifndef V8_TORQUE_RUNTIME_MACRO_SHIMS_H_
+#define V8_TORQUE_RUNTIME_MACRO_SHIMS_H_
+
+#include "src/objects/smi.h"
+
+namespace v8 {
+namespace internal {
+namespace TorqueRuntimeMacroShims {
+namespace CodeStubAssembler {
+
+inline intptr_t ChangeInt32ToIntPtr(Isolate* isolate, int32_t i) { return i; }
+inline uintptr_t ChangeUint32ToWord(Isolate* isolate, uint32_t u) { return u; }
+inline intptr_t IntPtrAdd(Isolate* isolate, intptr_t a, intptr_t b) {
+ return a + b;
+}
+inline intptr_t IntPtrMul(Isolate* isolate, intptr_t a, intptr_t b) {
+ return a * b;
+}
+inline intptr_t Signed(Isolate* isolate, uintptr_t u) {
+ return static_cast<intptr_t>(u);
+}
+inline int32_t SmiUntag(Isolate* isolate, Smi s) { return s.value(); }
+
+} // namespace CodeStubAssembler
+} // namespace TorqueRuntimeMacroShims
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_RUNTIME_MACRO_SHIMS_H_
diff --git a/deps/v8/src/torque/torque-code-generator.cc b/deps/v8/src/torque/torque-code-generator.cc
new file mode 100644
index 0000000000..46763be468
--- /dev/null
+++ b/deps/v8/src/torque/torque-code-generator.cc
@@ -0,0 +1,60 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/torque-code-generator.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+bool TorqueCodeGenerator::IsEmptyInstruction(const Instruction& instruction) {
+ switch (instruction.kind()) {
+ case InstructionKind::kPeekInstruction:
+ case InstructionKind::kPokeInstruction:
+ case InstructionKind::kDeleteRangeInstruction:
+ case InstructionKind::kPushUninitializedInstruction:
+ case InstructionKind::kPushBuiltinPointerInstruction:
+ case InstructionKind::kUnsafeCastInstruction:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void TorqueCodeGenerator::EmitInstruction(const Instruction& instruction,
+ Stack<std::string>* stack) {
+#ifdef DEBUG
+ if (!IsEmptyInstruction(instruction)) {
+ EmitSourcePosition(instruction->pos);
+ }
+#endif
+
+ switch (instruction.kind()) {
+#define ENUM_ITEM(T) \
+ case InstructionKind::k##T: \
+ return EmitInstruction(instruction.Cast<T>(), stack);
+ TORQUE_INSTRUCTION_LIST(ENUM_ITEM)
+#undef ENUM_ITEM
+ }
+}
+
+void TorqueCodeGenerator::EmitInstruction(const PeekInstruction& instruction,
+ Stack<std::string>* stack) {
+ stack->Push(stack->Peek(instruction.slot));
+}
+
+void TorqueCodeGenerator::EmitInstruction(const PokeInstruction& instruction,
+ Stack<std::string>* stack) {
+ stack->Poke(instruction.slot, stack->Top());
+ stack->Pop();
+}
+
+void TorqueCodeGenerator::EmitInstruction(
+ const DeleteRangeInstruction& instruction, Stack<std::string>* stack) {
+ stack->DeleteRange(instruction.range);
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/torque-code-generator.h b/deps/v8/src/torque/torque-code-generator.h
new file mode 100644
index 0000000000..ddbd5309c9
--- /dev/null
+++ b/deps/v8/src/torque/torque-code-generator.h
@@ -0,0 +1,93 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_TORQUE_CODE_GENERATOR_H_
+#define V8_TORQUE_TORQUE_CODE_GENERATOR_H_
+
+#include <iostream>
+
+#include "src/torque/cfg.h"
+#include "src/torque/declarable.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+class TorqueCodeGenerator {
+ public:
+ TorqueCodeGenerator(const ControlFlowGraph& cfg, std::ostream& out)
+ : cfg_(cfg),
+ out_(&out),
+ out_decls_(&out),
+ previous_position_(SourcePosition::Invalid()) {}
+
+ protected:
+ const ControlFlowGraph& cfg_;
+ std::ostream* out_;
+ std::ostream* out_decls_;
+ size_t fresh_id_ = 0;
+ SourcePosition previous_position_;
+ std::map<DefinitionLocation, std::string> location_map_;
+
+ std::string DefinitionToVariable(const DefinitionLocation& location) {
+ if (location.IsPhi()) {
+ std::stringstream stream;
+ stream << "phi_bb" << location.GetPhiBlock()->id() << "_"
+ << location.GetPhiIndex();
+ return stream.str();
+ } else if (location.IsParameter()) {
+ auto it = location_map_.find(location);
+ DCHECK_NE(it, location_map_.end());
+ return it->second;
+ } else {
+ DCHECK(location.IsInstruction());
+ auto it = location_map_.find(location);
+ if (it == location_map_.end()) {
+ it = location_map_.insert(std::make_pair(location, FreshNodeName()))
+ .first;
+ }
+ return it->second;
+ }
+ }
+
+ void SetDefinitionVariable(const DefinitionLocation& definition,
+ const std::string& str) {
+ DCHECK_EQ(location_map_.find(definition), location_map_.end());
+ location_map_.insert(std::make_pair(definition, str));
+ }
+
+ std::ostream& out() { return *out_; }
+ std::ostream& decls() { return *out_decls_; }
+
+ static bool IsEmptyInstruction(const Instruction& instruction);
+ virtual void EmitSourcePosition(SourcePosition pos,
+ bool always_emit = false) = 0;
+
+ std::string FreshNodeName() { return "tmp" + std::to_string(fresh_id_++); }
+ std::string FreshCatchName() { return "catch" + std::to_string(fresh_id_++); }
+ std::string FreshLabelName() { return "label" + std::to_string(fresh_id_++); }
+ std::string BlockName(const Block* block) {
+ return "block" + std::to_string(block->id());
+ }
+
+ void EmitInstruction(const Instruction& instruction,
+ Stack<std::string>* stack);
+
+#define EMIT_INSTRUCTION_DECLARATION(T) \
+ void EmitInstruction(const T& instruction, Stack<std::string>* stack);
+ TORQUE_BACKEND_AGNOSTIC_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
+#undef EMIT_INSTRUCTION_DECLARATION
+
+#define EMIT_INSTRUCTION_DECLARATION(T) \
+ virtual void EmitInstruction(const T& instruction, \
+ Stack<std::string>* stack) = 0;
+ TORQUE_BACKEND_DEPENDENT_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
+#undef EMIT_INSTRUCTION_DECLARATION
+};
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_TORQUE_CODE_GENERATOR_H_
diff --git a/deps/v8/src/torque/torque-compiler.cc b/deps/v8/src/torque/torque-compiler.cc
index 20bc297354..9e00412ca1 100644
--- a/deps/v8/src/torque/torque-compiler.cc
+++ b/deps/v8/src/torque/torque-compiler.cc
@@ -75,7 +75,8 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
implementation_visitor.SetDryRun(output_directory.length() == 0);
implementation_visitor.GenerateInstanceTypes(output_directory);
- implementation_visitor.BeginCSAFiles();
+ implementation_visitor.BeginGeneratedFiles();
+ implementation_visitor.BeginRuntimeMacrosFile();
implementation_visitor.VisitAllDeclarables();
@@ -94,7 +95,8 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
implementation_visitor.GenerateExportedMacrosAssembler(output_directory);
implementation_visitor.GenerateCSATypes(output_directory);
- implementation_visitor.EndCSAFiles();
+ implementation_visitor.EndGeneratedFiles();
+ implementation_visitor.EndRuntimeMacrosFile();
implementation_visitor.GenerateImplementation(output_directory);
if (GlobalContext::collect_language_server_data()) {
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index 51778161d9..b3ff1538b2 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -580,18 +580,23 @@ base::Optional<ParseResult> MakeIntrinsicDeclaration(
}
namespace {
-bool HasExportAnnotation(ParseResultIterator* child_results,
- const char* declaration) {
+bool HasAnnotation(ParseResultIterator* child_results, const char* annotation,
+ const char* declaration) {
auto annotations = child_results->NextAs<std::vector<Annotation>>();
if (annotations.size()) {
- if (annotations.size() > 1 || annotations[0].name->value != "@export") {
- Error(declaration,
- " declarations only support a single @export annotation");
+ if (annotations.size() > 1 || annotations[0].name->value != annotation) {
+ Error(declaration, " declarations only support a single ", annotation,
+ " annotation");
}
return true;
}
return false;
}
+
+bool HasExportAnnotation(ParseResultIterator* child_results,
+ const char* declaration) {
+ return HasAnnotation(child_results, ANNOTATION_EXPORT, declaration);
+}
} // namespace
base::Optional<ParseResult> MakeTorqueMacroDeclaration(
@@ -685,6 +690,8 @@ base::Optional<ParseResult> MakeTypeAliasDeclaration(
base::Optional<ParseResult> MakeAbstractTypeDeclaration(
ParseResultIterator* child_results) {
+ bool use_parent_type_checker = HasAnnotation(
+ child_results, ANNOTATION_USE_PARENT_TYPE_CHECKER, "abstract type");
auto transient = child_results->NextAs<bool>();
auto name = child_results->NextAs<Identifier*>();
if (!IsValidTypeName(name->value)) {
@@ -693,8 +700,11 @@ base::Optional<ParseResult> MakeAbstractTypeDeclaration(
auto generic_parameters = child_results->NextAs<GenericParameters>();
auto extends = child_results->NextAs<base::Optional<TypeExpression*>>();
auto generates = child_results->NextAs<base::Optional<std::string>>();
+ AbstractTypeFlags flags(AbstractTypeFlag::kNone);
+ if (transient) flags |= AbstractTypeFlag::kTransient;
+ if (use_parent_type_checker) flags |= AbstractTypeFlag::kUseParentTypeChecker;
TypeDeclaration* type_decl = MakeNode<AbstractTypeDeclaration>(
- name, transient, extends, std::move(generates));
+ name, flags, extends, std::move(generates));
Declaration* decl = type_decl;
if (!generic_parameters.empty()) {
decl = MakeNode<GenericTypeDeclaration>(generic_parameters, type_decl);
@@ -715,7 +725,8 @@ base::Optional<ParseResult> MakeAbstractTypeDeclaration(
constexpr_extends = AddConstexpr(*extends);
}
TypeDeclaration* constexpr_decl = MakeNode<AbstractTypeDeclaration>(
- constexpr_name, transient, constexpr_extends, constexpr_generates);
+ constexpr_name, flags | AbstractTypeFlag::kConstexpr, constexpr_extends,
+ constexpr_generates);
constexpr_decl->pos = name->pos;
Declaration* decl = constexpr_decl;
if (!generic_parameters.empty()) {
@@ -878,8 +889,9 @@ base::Optional<ParseResult> MakeClassDeclaration(
child_results,
{ANNOTATION_GENERATE_PRINT, ANNOTATION_NO_VERIFIER, ANNOTATION_ABSTRACT,
ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT,
- ANNOTATION_GENERATE_CPP_CLASS, ANNOTATION_GENERATE_BODY_DESCRIPTOR,
- ANNOTATION_EXPORT_CPP_CLASS, ANNOTATION_DO_NOT_GENERATE_CAST,
+ ANNOTATION_GENERATE_CPP_CLASS, ANNOTATION_CUSTOM_CPP_CLASS,
+ ANNOTATION_CUSTOM_MAP, ANNOTATION_GENERATE_BODY_DESCRIPTOR,
+ ANNOTATION_EXPORT, ANNOTATION_DO_NOT_GENERATE_CAST,
ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT,
ANNOTATION_LOWEST_INSTANCE_TYPE_WITHIN_PARENT},
{ANNOTATION_RESERVE_BITS_IN_INSTANCE_TYPE,
@@ -898,13 +910,19 @@ base::Optional<ParseResult> MakeClassDeclaration(
if (annotations.Contains(ANNOTATION_GENERATE_CPP_CLASS)) {
flags |= ClassFlag::kGenerateCppClassDefinitions;
}
+ if (annotations.Contains(ANNOTATION_CUSTOM_CPP_CLASS)) {
+ flags |= ClassFlag::kCustomCppClass;
+ }
+ if (annotations.Contains(ANNOTATION_CUSTOM_MAP)) {
+ flags |= ClassFlag::kCustomMap;
+ }
if (annotations.Contains(ANNOTATION_DO_NOT_GENERATE_CAST)) {
flags |= ClassFlag::kDoNotGenerateCast;
}
if (annotations.Contains(ANNOTATION_GENERATE_BODY_DESCRIPTOR)) {
flags |= ClassFlag::kGenerateBodyDescriptor;
}
- if (annotations.Contains(ANNOTATION_EXPORT_CPP_CLASS)) {
+ if (annotations.Contains(ANNOTATION_EXPORT)) {
flags |= ClassFlag::kExport;
}
if (annotations.Contains(ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT)) {
@@ -972,8 +990,10 @@ base::Optional<ParseResult> MakeClassDeclaration(
MakeNode<Identifier>(CONSTEXPR_TYPE_PREFIX + name->value);
constexpr_name->pos = name->pos;
TypeExpression* constexpr_extends = AddConstexpr(extends);
+ AbstractTypeFlags abstract_type_flags(AbstractTypeFlag::kConstexpr);
+ if (transient) abstract_type_flags |= AbstractTypeFlag::kTransient;
TypeDeclaration* constexpr_decl = MakeNode<AbstractTypeDeclaration>(
- constexpr_name, transient, constexpr_extends, name->value);
+ constexpr_name, abstract_type_flags, constexpr_extends, name->value);
constexpr_decl->pos = name->pos;
result.push_back(constexpr_decl);
@@ -1280,7 +1300,8 @@ base::Optional<ParseResult> MakeEnumDeclaration(
// type kEntryN extends Enum;
// }
auto type_decl = MakeNode<AbstractTypeDeclaration>(
- name_identifier, false, base_type_expression, base::nullopt);
+ name_identifier, AbstractTypeFlag::kNone, base_type_expression,
+ base::nullopt);
TypeExpression* name_type_expression =
MakeNode<BasicTypeExpression>(name_identifier->value);
@@ -1289,8 +1310,8 @@ base::Optional<ParseResult> MakeEnumDeclaration(
std::vector<Declaration*> entry_decls;
for (const auto& entry : entries) {
entry_decls.push_back(MakeNode<AbstractTypeDeclaration>(
- entry.name, false, entry.type.value_or(name_type_expression),
- base::nullopt));
+ entry.name, AbstractTypeFlag::kNone,
+ entry.type.value_or(name_type_expression), base::nullopt));
}
result.push_back(type_decl);
@@ -1309,8 +1330,8 @@ base::Optional<ParseResult> MakeEnumDeclaration(
std::vector<Declaration*> entry_decls;
for (const auto& entry : entries) {
entry_decls.push_back(MakeNode<AbstractTypeDeclaration>(
- entry.name, false, entry.type.value_or(*base_type_expression),
- base::nullopt));
+ entry.name, AbstractTypeFlag::kNone,
+ entry.type.value_or(*base_type_expression), base::nullopt));
auto entry_type = MakeNode<BasicTypeExpression>(
std::vector<std::string>{name}, entry.name->value,
@@ -1348,8 +1369,8 @@ base::Optional<ParseResult> MakeEnumDeclaration(
base_constexpr_type_expression = AddConstexpr(*base_type_expression);
}
result.push_back(MakeNode<AbstractTypeDeclaration>(
- constexpr_type_identifier, false, base_constexpr_type_expression,
- constexpr_generates));
+ constexpr_type_identifier, AbstractTypeFlag::kConstexpr,
+ base_constexpr_type_expression, constexpr_generates));
TypeExpression* type_expr = nullptr;
Identifier* fromconstexpr_identifier = nullptr;
@@ -1386,8 +1407,9 @@ base::Optional<ParseResult> MakeEnumDeclaration(
"::" + entry_name);
entry_decls.push_back(MakeNode<AbstractTypeDeclaration>(
- MakeNode<Identifier>(entry_constexpr_type), false,
- constexpr_type_expression, constexpr_generates));
+ MakeNode<Identifier>(entry_constexpr_type),
+ AbstractTypeFlag::kConstexpr, constexpr_type_expression,
+ constexpr_generates));
bool generate_typed_constant = entry.type.has_value();
if (generate_typed_constant) {
@@ -2535,7 +2557,7 @@ struct TorqueGrammar : Grammar {
Token("{"), List<BitFieldDeclaration>(&bitFieldDeclaration),
Token("}")},
AsSingletonVector<Declaration*, MakeBitFieldStructDeclaration>()),
- Rule({CheckIf(Token("transient")), Token("type"), &name,
+ Rule({annotations, CheckIf(Token("transient")), Token("type"), &name,
TryOrDefault<GenericParameters>(&genericParameters),
Optional<TypeExpression*>(Sequence({Token("extends"), &type})),
Optional<std::string>(
diff --git a/deps/v8/src/torque/type-visitor.cc b/deps/v8/src/torque/type-visitor.cc
index 3b37593fc6..a706fc561d 100644
--- a/deps/v8/src/torque/type-visitor.cc
+++ b/deps/v8/src/torque/type-visitor.cc
@@ -77,7 +77,7 @@ std::string ComputeGeneratesType(base::Optional<std::string> opt_gen,
const AbstractType* TypeVisitor::ComputeType(
AbstractTypeDeclaration* decl, MaybeSpecializationKey specialized_from) {
std::string generates =
- ComputeGeneratesType(decl->generates, !decl->is_constexpr);
+ ComputeGeneratesType(decl->generates, !decl->IsConstexpr());
const Type* parent_type = nullptr;
if (decl->extends) {
@@ -90,25 +90,21 @@ const AbstractType* TypeVisitor::ComputeType(
}
}
- if (decl->is_constexpr && decl->transient) {
+ if (decl->IsConstexpr() && decl->IsTransient()) {
ReportError("cannot declare a transient type that is also constexpr");
}
const Type* non_constexpr_version = nullptr;
- if (decl->is_constexpr) {
+ if (decl->IsConstexpr()) {
QualifiedName non_constexpr_name{GetNonConstexprName(decl->name->value)};
if (auto type = Declarations::TryLookupType(non_constexpr_name)) {
non_constexpr_version = *type;
}
}
- AbstractTypeFlags flags = AbstractTypeFlag::kNone;
- if (decl->transient) flags |= AbstractTypeFlag::kTransient;
- if (decl->is_constexpr) flags |= AbstractTypeFlag::kConstexpr;
-
- return TypeOracle::GetAbstractType(parent_type, decl->name->value, flags,
- generates, non_constexpr_version,
- specialized_from);
+ return TypeOracle::GetAbstractType(parent_type, decl->name->value,
+ decl->flags, generates,
+ non_constexpr_version, specialized_from);
}
void DeclareMethods(AggregateType* container_type,
@@ -291,6 +287,15 @@ const ClassType* TypeVisitor::ComputeType(
Error("Class \"", decl->name->value,
"\" requires a layout but doesn't have one");
}
+ if (flags & ClassFlag::kCustomCppClass) {
+ if (!(flags & ClassFlag::kExport)) {
+ Error("Only exported classes can have a custom C++ class.");
+ }
+ if (flags & ClassFlag::kExtern) {
+ Error("No need to specify ", ANNOTATION_CUSTOM_CPP_CLASS,
+ ", extern classes always have a custom C++ class.");
+ }
+ }
if (flags & ClassFlag::kExtern) {
if (decl->generates) {
bool enforce_tnode_type = true;
@@ -354,14 +359,17 @@ const Type* TypeVisitor::ComputeType(TypeExpression* type_expression) {
UnionTypeExpression::DynamicCast(type_expression)) {
return TypeOracle::GetUnionType(ComputeType(union_type->a),
ComputeType(union_type->b));
- } else {
- auto* function_type_exp = FunctionTypeExpression::cast(type_expression);
+ } else if (auto* function_type_exp =
+ FunctionTypeExpression::DynamicCast(type_expression)) {
TypeVector argument_types;
for (TypeExpression* type_exp : function_type_exp->parameters) {
argument_types.push_back(ComputeType(type_exp));
}
return TypeOracle::GetBuiltinPointerType(
argument_types, ComputeType(function_type_exp->return_type));
+ } else {
+ auto* precomputed = PrecomputedTypeExpression::cast(type_expression);
+ return precomputed->type;
}
}
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index df35c46300..70dc0fb9fe 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -12,6 +12,7 @@
#include "src/torque/ast.h"
#include "src/torque/declarable.h"
#include "src/torque/global-context.h"
+#include "src/torque/source-positions.h"
#include "src/torque/type-oracle.h"
#include "src/torque/type-visitor.h"
@@ -74,12 +75,18 @@ std::string Type::SimpleName() const {
std::string Type::HandlifiedCppTypeName() const {
if (IsSubtypeOf(TypeOracle::GetSmiType())) return "int";
if (IsSubtypeOf(TypeOracle::GetTaggedType())) {
- return "Handle<" + ConstexprVersion()->GetGeneratedTypeName() + ">";
+ return "Handle<" + UnhandlifiedCppTypeName() + ">";
} else {
- return ConstexprVersion()->GetGeneratedTypeName();
+ return UnhandlifiedCppTypeName();
}
}
+std::string Type::UnhandlifiedCppTypeName() const {
+ if (IsSubtypeOf(TypeOracle::GetSmiType())) return "int";
+ if (this == TypeOracle::GetObjectType()) return "Object";
+ return GetConstexprGeneratedTypeName();
+}
+
bool Type::IsSubtypeOf(const Type* supertype) const {
if (supertype->IsTopType()) return true;
if (IsNever()) return true;
@@ -173,13 +180,14 @@ std::string AbstractType::GetGeneratedTNodeTypeNameImpl() const {
return generated_type_;
}
-std::vector<RuntimeType> AbstractType::GetRuntimeTypes() const {
- std::string type_name = GetGeneratedTNodeTypeName();
+std::vector<TypeChecker> AbstractType::GetTypeCheckers() const {
+ if (UseParentTypeChecker()) return parent()->GetTypeCheckers();
+ std::string type_name = name();
if (auto strong_type =
Type::MatchUnaryGeneric(this, TypeOracle::GetWeakGeneric())) {
- auto strong_runtime_types = (*strong_type)->GetRuntimeTypes();
- std::vector<RuntimeType> result;
- for (const RuntimeType& type : strong_runtime_types) {
+ auto strong_runtime_types = (*strong_type)->GetTypeCheckers();
+ std::vector<TypeChecker> result;
+ for (const TypeChecker& type : strong_runtime_types) {
// Generic parameter in Weak<T> should have already been checked to
// extend HeapObject, so it couldn't itself be another weak type.
DCHECK(type.weak_ref_to.empty());
@@ -643,29 +651,79 @@ bool ClassType::HasNoPointerSlots() const {
return true;
}
+bool ClassType::HasIndexedFieldsIncludingInParents() const {
+ for (const auto& field : fields_) {
+ if (field.index.has_value()) return true;
+ }
+ if (const ClassType* parent = GetSuperClass()) {
+ return parent->HasIndexedFieldsIncludingInParents();
+ }
+ return false;
+}
+
+const Field* ClassType::GetFieldPreceding(size_t field_index) const {
+ if (field_index > 0) {
+ return &fields_[field_index - 1];
+ }
+ if (const ClassType* parent = GetSuperClass()) {
+ return parent->GetFieldPreceding(parent->fields_.size());
+ }
+ return nullptr;
+}
+
+const ClassType* ClassType::GetClassDeclaringField(const Field& f) const {
+ for (const Field& field : fields_) {
+ if (f.name_and_type.name == field.name_and_type.name) return this;
+ }
+ return GetSuperClass()->GetClassDeclaringField(f);
+}
+
+std::string ClassType::GetSliceMacroName(const Field& field) const {
+ const ClassType* declarer = GetClassDeclaringField(field);
+ return "FieldSlice" + declarer->name() +
+ CamelifyString(field.name_and_type.name);
+}
+
void ClassType::GenerateAccessors() {
+ bool at_or_after_indexed_field = false;
+ if (const ClassType* parent = GetSuperClass()) {
+ at_or_after_indexed_field = parent->HasIndexedFieldsIncludingInParents();
+ }
// For each field, construct AST snippets that implement a CSA accessor
// function. The implementation iterator will turn the snippets into code.
- for (auto& field : fields_) {
+ for (size_t field_index = 0; field_index < fields_.size(); ++field_index) {
+ Field& field = fields_[field_index];
if (field.name_and_type.type == TypeOracle::GetVoidType()) {
continue;
}
+ at_or_after_indexed_field =
+ at_or_after_indexed_field || field.index.has_value();
CurrentSourcePosition::Scope position_activator(field.pos);
- IdentifierExpression* parameter =
- MakeNode<IdentifierExpression>(MakeNode<Identifier>(std::string{"o"}));
- IdentifierExpression* index =
- MakeNode<IdentifierExpression>(MakeNode<Identifier>(std::string{"i"}));
+ IdentifierExpression* parameter = MakeIdentifierExpression("o");
+ IdentifierExpression* index = MakeIdentifierExpression("i");
- // Load accessor
std::string camel_field_name = CamelifyString(field.name_and_type.name);
- std::string load_macro_name = "Load" + this->name() + camel_field_name;
+
+ if (at_or_after_indexed_field) {
+ if (!field.index.has_value()) {
+ // There's no fundamental reason we couldn't generate functions to get
+ // references instead of slices, but it's not yet implemented.
+ ReportError(
+ "Torque doesn't yet support non-indexed fields after indexed "
+ "fields");
+ }
+
+ GenerateSliceAccessor(field_index);
+ }
// For now, only generate indexed accessors for simple types
if (field.index.has_value() && field.name_and_type.type->IsStructType()) {
continue;
}
+ // Load accessor
+ std::string load_macro_name = "Load" + this->name() + camel_field_name;
Signature load_signature;
load_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
load_signature.parameter_types.types.push_back(this);
@@ -677,8 +735,8 @@ void ClassType::GenerateAccessors() {
load_signature.parameter_types.var_args = false;
load_signature.return_type = field.name_and_type.type;
- Expression* load_expression = MakeNode<FieldAccessExpression>(
- parameter, MakeNode<Identifier>(field.name_and_type.name));
+ Expression* load_expression =
+ MakeFieldAccessExpression(parameter, field.name_and_type.name);
if (field.index) {
load_expression =
MakeNode<ElementAccessExpression>(load_expression, index);
@@ -689,8 +747,7 @@ void ClassType::GenerateAccessors() {
// Store accessor
if (!field.const_qualified) {
- IdentifierExpression* value = MakeNode<IdentifierExpression>(
- std::vector<std::string>{}, MakeNode<Identifier>(std::string{"v"}));
+ IdentifierExpression* value = MakeIdentifierExpression("v");
std::string store_macro_name = "Store" + this->name() + camel_field_name;
Signature store_signature;
store_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
@@ -705,8 +762,8 @@ void ClassType::GenerateAccessors() {
store_signature.parameter_types.var_args = false;
// TODO(danno): Store macros probably should return their value argument
store_signature.return_type = TypeOracle::GetVoidType();
- Expression* store_expression = MakeNode<FieldAccessExpression>(
- parameter, MakeNode<Identifier>(field.name_and_type.name));
+ Expression* store_expression =
+ MakeFieldAccessExpression(parameter, field.name_and_type.name);
if (field.index) {
store_expression =
MakeNode<ElementAccessExpression>(store_expression, index);
@@ -720,6 +777,131 @@ void ClassType::GenerateAccessors() {
}
}
+void ClassType::GenerateSliceAccessor(size_t field_index) {
+ // Generate a Torque macro for getting a Slice to this field. This macro can
+ // be called by the dot operator for this field. In Torque, this function for
+ // class "ClassName" and field "field_name" and field type "FieldType" would
+ // be written as one of the following:
+ //
+ // If the field has a known offset (in this example, 16):
+ // FieldSliceClassNameFieldName(o: ClassName) {
+ // return torque_internal::Slice<FieldType> {
+ // object: o,
+ // offset: 16,
+ // length: torque_internal::%IndexedFieldLength<ClassName>(
+ // o, "field_name")),
+ // unsafeMarker: torque_internal::Unsafe {}
+ // };
+ // }
+ //
+ // If the field has an unknown offset, and the previous field is named p, and
+ // an item in the previous field has size 4:
+ // FieldSliceClassNameFieldName(o: ClassName) {
+ // const previous = &o.p;
+ // return torque_internal::Slice<FieldType> {
+ // object: o,
+ // offset: previous.offset + 4 * previous.length,
+ // length: torque_internal::%IndexedFieldLength<ClassName>(
+ // o, "field_name")),
+ // unsafeMarker: torque_internal::Unsafe {}
+ // };
+ // }
+ const Field& field = fields_[field_index];
+ std::string macro_name = GetSliceMacroName(field);
+ Signature signature;
+ Identifier* parameter_identifier = MakeNode<Identifier>("o");
+ signature.parameter_names.push_back(parameter_identifier);
+ signature.parameter_types.types.push_back(this);
+ signature.parameter_types.var_args = false;
+ signature.return_type = TypeOracle::GetSliceType(field.name_and_type.type);
+
+ std::vector<Statement*> statements;
+ Expression* offset_expression = nullptr;
+ IdentifierExpression* parameter =
+ MakeNode<IdentifierExpression>(parameter_identifier);
+
+ if (field.offset.has_value()) {
+ offset_expression =
+ MakeNode<NumberLiteralExpression>(static_cast<double>(*field.offset));
+ } else {
+ const Field* previous = GetFieldPreceding(field_index);
+ DCHECK_NOT_NULL(previous);
+
+ // o.p
+ Expression* previous_expression =
+ MakeFieldAccessExpression(parameter, previous->name_and_type.name);
+
+ // &o.p
+ previous_expression = MakeCallExpression("&", {previous_expression});
+
+ // const previous = &o.p;
+ Statement* define_previous =
+ MakeConstDeclarationStatement("previous", previous_expression);
+ statements.push_back(define_previous);
+
+ // 4
+ size_t previous_element_size;
+ std::tie(previous_element_size, std::ignore) =
+ *SizeOf(previous->name_and_type.type);
+ Expression* previous_element_size_expression =
+ MakeNode<NumberLiteralExpression>(
+ static_cast<double>(previous_element_size));
+
+ // previous.length
+ Expression* previous_length_expression = MakeFieldAccessExpression(
+ MakeIdentifierExpression("previous"), "length");
+
+ // previous.offset
+ Expression* previous_offset_expression = MakeFieldAccessExpression(
+ MakeIdentifierExpression("previous"), "offset");
+
+ // 4 * previous.length
+ // In contrast to the code used for allocation, we don't need overflow
+ // checks here because we already know all the offsets fit into memory.
+ offset_expression = MakeCallExpression(
+ "*", {previous_element_size_expression, previous_length_expression});
+
+ // previous.offset + 4 * previous.length
+ offset_expression = MakeCallExpression(
+ "+", {previous_offset_expression, offset_expression});
+ }
+
+ // torque_internal::%IndexedFieldLength<ClassName>(o, "field_name")
+ Expression* length_expression = MakeCallExpression(
+ MakeIdentifierExpression({"torque_internal"}, "%IndexedFieldLength",
+ {MakeNode<PrecomputedTypeExpression>(this)}),
+ {parameter, MakeNode<StringLiteralExpression>(
+ StringLiteralQuote(field.name_and_type.name))});
+
+ // torque_internal::Unsafe {}
+ Expression* unsafe_expression = MakeStructExpression(
+ MakeBasicTypeExpression({"torque_internal"}, "Unsafe"), {});
+
+ // torque_internal::Slice<FieldType> {
+ // object: o,
+ // offset: <<offset_expression>>,
+ // length: torque_internal::%IndexedFieldLength<ClassName>(
+ // o, "field_name")),
+ // unsafeMarker: torque_internal::Unsafe {}
+ // }
+ Expression* slice_expression = MakeStructExpression(
+ MakeBasicTypeExpression(
+ {"torque_internal"}, "Slice",
+ {MakeNode<PrecomputedTypeExpression>(field.name_and_type.type)}),
+ {{MakeNode<Identifier>("object"), parameter},
+ {MakeNode<Identifier>("offset"), offset_expression},
+ {MakeNode<Identifier>("length"), length_expression},
+ {MakeNode<Identifier>("unsafeMarker"), unsafe_expression}});
+
+ statements.push_back(MakeNode<ReturnStatement>(slice_expression));
+ Statement* block =
+ MakeNode<BlockStatement>(/*deferred=*/false, std::move(statements));
+
+ Macro* macro = Declarations::DeclareMacro(macro_name, true, base::nullopt,
+ signature, block, base::nullopt);
+ GlobalContext::EnsureInCCOutputList(TorqueMacro::cast(macro));
+}
+
bool ClassType::HasStaticSize() const {
// Abstract classes don't have instances directly, so asking this question
// doesn't make sense.
@@ -728,6 +910,15 @@ bool ClassType::HasStaticSize() const {
return size().SingleValue().has_value();
}
+SourceId ClassType::AttributedToFile() const {
+ bool in_test_directory = StringStartsWith(
+ SourceFileMap::PathFromV8Root(GetPosition().source).substr(), "test/");
+ if (!in_test_directory && (IsExtern() || ShouldExport())) {
+ return GetPosition().source;
+ }
+ return SourceFileMap::GetSourceId("src/objects/torque-defined-classes.tq");
+}
+
void PrintSignature(std::ostream& os, const Signature& sig, bool with_names) {
os << "(";
for (size_t i = 0; i < sig.parameter_types.types.size(); ++i) {
@@ -1096,10 +1287,23 @@ base::Optional<NameAndType> ExtractSimpleFieldArraySize(
}
std::string Type::GetRuntimeType() const {
- // TODO(tebbi): Other types are currently unsupported, since there the TNode
- // types and the C++ runtime types disagree.
- DCHECK(this->IsSubtypeOf(TypeOracle::GetTaggedType()));
- return GetGeneratedTNodeTypeName();
+ if (IsSubtypeOf(TypeOracle::GetSmiType())) return "Smi";
+ if (IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ return GetGeneratedTNodeTypeName();
+ }
+ if (base::Optional<const StructType*> struct_type = StructSupertype()) {
+ std::stringstream result;
+ result << "std::tuple<";
+ bool first = true;
+ for (const Type* field_type : LowerType(*struct_type)) {
+ if (!first) result << ", ";
+ first = false;
+ result << field_type->GetRuntimeType();
+ }
+ result << ">";
+ return result.str();
+ }
+ return ConstexprVersion()->GetGeneratedTypeName();
}
} // namespace torque
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index d2e857a261..25c849597d 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -95,7 +95,10 @@ struct SpecializationKey {
using MaybeSpecializationKey = base::Optional<SpecializationKey<GenericType>>;
-struct RuntimeType {
+struct TypeChecker {
+ // The type of the object. This string is not guaranteed to correspond to a
+ // C++ class, but just to a type checker function: for any type "Foo" here,
+ // the function Object::IsFoo must exist.
std::string type;
// If {type} is "MaybeObject", then {weak_ref_to} indicates the corresponding
// strong object type. Otherwise, {weak_ref_to} is empty.
@@ -114,6 +117,7 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
// Used for naming generated code.
virtual std::string SimpleName() const;
+ std::string UnhandlifiedCppTypeName() const;
std::string HandlifiedCppTypeName() const;
const Type* parent() const { return parent_; }
@@ -135,7 +139,7 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
std::string GetConstexprGeneratedTypeName() const;
base::Optional<const ClassType*> ClassSupertype() const;
base::Optional<const StructType*> StructSupertype() const;
- virtual std::vector<RuntimeType> GetRuntimeTypes() const { return {}; }
+ virtual std::vector<TypeChecker> GetTypeCheckers() const { return {}; }
virtual std::string GetRuntimeType() const;
static const Type* CommonSupertype(const Type* a, const Type* b);
void AddAlias(std::string alias) const { aliases_.insert(std::move(alias)); }
@@ -156,6 +160,7 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
virtual const Type* ConstexprVersion() const {
if (constexpr_version_) return constexpr_version_;
if (IsConstexpr()) return this;
+ if (parent()) return parent()->ConstexprVersion();
return nullptr;
}
@@ -279,7 +284,7 @@ class AbstractType final : public Type {
return nullptr;
}
- std::vector<RuntimeType> GetRuntimeTypes() const override;
+ std::vector<TypeChecker> GetTypeCheckers() const override;
size_t AlignmentLog2() const override;
@@ -315,6 +320,10 @@ class AbstractType final : public Type {
return flags_ & AbstractTypeFlag::kTransient;
}
+ bool UseParentTypeChecker() const {
+ return flags_ & AbstractTypeFlag::kUseParentTypeChecker;
+ }
+
AbstractTypeFlags flags_;
const std::string name_;
const std::string generated_type_;
@@ -349,7 +358,7 @@ class V8_EXPORT_PRIVATE BuiltinPointerType final : public Type {
}
size_t function_pointer_type_id() const { return function_pointer_type_id_; }
- std::vector<RuntimeType> GetRuntimeTypes() const override {
+ std::vector<TypeChecker> GetTypeCheckers() const override {
return {{"Smi", ""}};
}
@@ -461,10 +470,10 @@ class V8_EXPORT_PRIVATE UnionType final : public Type {
return union_type ? UnionType(*union_type) : UnionType(t);
}
- std::vector<RuntimeType> GetRuntimeTypes() const override {
- std::vector<RuntimeType> result;
+ std::vector<TypeChecker> GetTypeCheckers() const override {
+ std::vector<TypeChecker> result;
for (const Type* member : types_) {
- std::vector<RuntimeType> sub_result = member->GetRuntimeTypes();
+ std::vector<TypeChecker> sub_result = member->GetTypeCheckers();
result.insert(result.end(), sub_result.begin(), sub_result.end());
}
return result;
@@ -498,8 +507,8 @@ class V8_EXPORT_PRIVATE BitFieldStructType final : public Type {
return parent()->GetGeneratedTNodeTypeName();
}
- std::vector<RuntimeType> GetRuntimeTypes() const override {
- return {{parent()->GetGeneratedTNodeTypeName(), ""}};
+ std::vector<TypeChecker> GetTypeCheckers() const override {
+ return parent()->GetTypeCheckers();
}
void SetConstexprVersion(const Type*) const override { UNREACHABLE(); }
@@ -559,7 +568,7 @@ class AggregateType : public Type {
std::vector<Method*> Methods(const std::string& name) const;
std::vector<const AggregateType*> GetHierarchy() const;
- std::vector<RuntimeType> GetRuntimeTypes() const override {
+ std::vector<TypeChecker> GetTypeCheckers() const override {
return {{name_, ""}};
}
@@ -609,6 +618,8 @@ class StructType final : public AggregateType {
// Classifies a struct as containing tagged data, untagged data, or both.
Classification ClassifyContents() const;
+ SourcePosition GetPosition() const { return decl_->pos; }
+
private:
friend class TypeOracle;
StructType(Namespace* nspace, const StructDeclaration* decl,
@@ -672,6 +683,11 @@ class ClassType final : public AggregateType {
return flags_ & ClassFlag::kGenerateCppClassDefinitions || !IsExtern() ||
ShouldGenerateBodyDescriptor();
}
+ bool ShouldGenerateFullClassDefinition() const {
+ return !IsExtern() && !(flags_ & ClassFlag::kCustomCppClass);
+ }
+ // Class with multiple or non-standard maps, do not auto-generate map.
+ bool HasCustomMap() const { return flags_ & ClassFlag::kCustomMap; }
bool ShouldExport() const { return flags_ & ClassFlag::kExport; }
bool IsShape() const { return flags_ & ClassFlag::kIsShape; }
bool HasStaticSize() const;
@@ -703,6 +719,14 @@ class ClassType final : public AggregateType {
std::vector<ObjectSlotKind> ComputeHeaderSlotKinds() const;
base::Optional<ObjectSlotKind> ComputeArraySlotKind() const;
bool HasNoPointerSlots() const;
+ bool HasIndexedFieldsIncludingInParents() const;
+ const Field* GetFieldPreceding(size_t field_index) const;
+
+ // Given that the field exists in this class or a superclass, returns the
+ // specific class that declared the field.
+ const ClassType* GetClassDeclaringField(const Field& f) const;
+
+ std::string GetSliceMacroName(const Field& field) const;
const InstanceTypeConstraints& GetInstanceTypeConstraints() const {
return decl_->instance_type_constraints;
@@ -717,6 +741,7 @@ class ClassType final : public AggregateType {
return flags_ & ClassFlag::kUndefinedLayout;
}
SourcePosition GetPosition() const { return decl_->pos; }
+ SourceId AttributedToFile() const;
// TODO(tebbi): We should no longer pass around types as const pointers, so
// that we can avoid mutable fields and const initializers for
@@ -733,6 +758,8 @@ class ClassType final : public AggregateType {
ClassFlags flags, const std::string& generates,
const ClassDeclaration* decl, const TypeAlias* alias);
+ void GenerateSliceAccessor(size_t field_index);
+
size_t header_size_;
ResidueClass size_;
mutable ClassFlags flags_;
diff --git a/deps/v8/src/tracing/DIR_METADATA b/deps/v8/src/tracing/DIR_METADATA
new file mode 100644
index 0000000000..3ba1106a5f
--- /dev/null
+++ b/deps/v8/src/tracing/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Platform>DevTools>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/tracing/OWNERS b/deps/v8/src/tracing/OWNERS
index 7ab7c063da..6afd4d0fee 100644
--- a/deps/v8/src/tracing/OWNERS
+++ b/deps/v8/src/tracing/OWNERS
@@ -1,4 +1,2 @@
alph@chromium.org
petermarshall@chromium.org
-
-# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/tracing/trace-categories.h b/deps/v8/src/tracing/trace-categories.h
index 2f9d672801..28c66a3101 100644
--- a/deps/v8/src/tracing/trace-categories.h
+++ b/deps/v8/src/tracing/trace-categories.h
@@ -46,6 +46,7 @@ PERFETTO_DEFINE_CATEGORIES(
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.runtime")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats_sampling")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.turbofan")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.zone_stats")),
diff --git a/deps/v8/src/trap-handler/DIR_METADATA b/deps/v8/src/trap-handler/DIR_METADATA
new file mode 100644
index 0000000000..3b428d9660
--- /dev/null
+++ b/deps/v8/src/trap-handler/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>WebAssembly"
+} \ No newline at end of file
diff --git a/deps/v8/src/trap-handler/OWNERS b/deps/v8/src/trap-handler/OWNERS
index f6f3bc07ec..7035a46ab6 100644
--- a/deps/v8/src/trap-handler/OWNERS
+++ b/deps/v8/src/trap-handler/OWNERS
@@ -6,5 +6,3 @@ ahaas@chromium.org
# ahaas@chromium.org
# mseaborn@chromium.org
# mark@chromium.org
-
-# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/src/trap-handler/handler-outside.cc b/deps/v8/src/trap-handler/handler-outside.cc
index c6ee7b2376..62355a5b60 100644
--- a/deps/v8/src/trap-handler/handler-outside.cc
+++ b/deps/v8/src/trap-handler/handler-outside.cc
@@ -249,9 +249,19 @@ bool RegisterDefaultTrapHandler() { return false; }
void RemoveTrapHandler() {}
#endif
-bool g_is_trap_handler_enabled;
+bool g_is_trap_handler_enabled{false};
+std::atomic<bool> g_can_enable_trap_handler{true};
bool EnableTrapHandler(bool use_v8_handler) {
+ // We should only enable the trap handler once, and before any call to
+ // {IsTrapHandlerEnabled}. Enabling the trap handler late can lead to problems
+ // because code or objects might have been generated under the assumption that
+ // trap handlers are disabled.
+ bool can_enable =
+ g_can_enable_trap_handler.exchange(false, std::memory_order_relaxed);
+ if (!can_enable) {
+ FATAL("EnableTrapHandler called twice, or after IsTrapHandlerEnabled");
+ }
if (!V8_TRAP_HANDLER_SUPPORTED) {
return false;
}
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index f6fdca553e..e75355decd 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -8,6 +8,8 @@
#include <stdint.h>
#include <stdlib.h>
+#include <atomic>
+
#include "src/base/build_config.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
@@ -64,15 +66,32 @@ void V8_EXPORT_PRIVATE ReleaseHandlerData(int index);
#define THREAD_LOCAL __thread
#endif
+// Initially false, set to true if when trap handlers are enabled. Never goes
+// back to false then.
extern bool g_is_trap_handler_enabled;
+
+// Initially true, set to false when either {IsTrapHandlerEnabled} or
+// {EnableTrapHandler} is called to prevent calling {EnableTrapHandler}
+// repeatedly, or after {IsTrapHandlerEnabled}. Needs to be atomic because
+// {IsTrapHandlerEnabled} can be called from any thread. Updated using relaxed
+// semantics, since it's not used for synchronization.
+extern std::atomic<bool> g_can_enable_trap_handler;
+
// Enables trap handling for WebAssembly bounds checks.
//
// use_v8_handler indicates that V8 should install its own handler
// rather than relying on the embedder to do it.
-bool EnableTrapHandler(bool use_v8_handler);
+V8_EXPORT_PRIVATE bool EnableTrapHandler(bool use_v8_handler);
inline bool IsTrapHandlerEnabled() {
DCHECK_IMPLIES(g_is_trap_handler_enabled, V8_TRAP_HANDLER_SUPPORTED);
+ // Disallow enabling the trap handler after retrieving the current value.
+ // Re-enabling them late can produce issues because code or objects might have
+ // been generated under the assumption that trap handlers are disabled.
+ // Note: We test before setting to avoid contention by an unconditional write.
+ if (g_can_enable_trap_handler.load(std::memory_order_relaxed)) {
+ g_can_enable_trap_handler.store(false, std::memory_order_relaxed);
+ }
return g_is_trap_handler_enabled;
}
diff --git a/deps/v8/src/utils/DIR_METADATA b/deps/v8/src/utils/DIR_METADATA
new file mode 100644
index 0000000000..2f8dbbcf45
--- /dev/null
+++ b/deps/v8/src/utils/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/utils/OWNERS b/deps/v8/src/utils/OWNERS
index 4750620072..48d72aea5e 100644
--- a/deps/v8/src/utils/OWNERS
+++ b/deps/v8/src/utils/OWNERS
@@ -1,3 +1 @@
file:../../COMMON_OWNERS
-
-# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/utils/bit-vector.cc b/deps/v8/src/utils/bit-vector.cc
index 20e645f24c..f90175189b 100644
--- a/deps/v8/src/utils/bit-vector.cc
+++ b/deps/v8/src/utils/bit-vector.cc
@@ -11,7 +11,7 @@ namespace v8 {
namespace internal {
#ifdef DEBUG
-void BitVector::Print() {
+void BitVector::Print() const {
bool first = true;
PrintF("{");
for (int i = 0; i < length(); i++) {
diff --git a/deps/v8/src/utils/bit-vector.h b/deps/v8/src/utils/bit-vector.h
index d68009d723..c171f51160 100644
--- a/deps/v8/src/utils/bit-vector.h
+++ b/deps/v8/src/utils/bit-vector.h
@@ -277,7 +277,7 @@ class V8_EXPORT_PRIVATE BitVector : public ZoneObject {
int length() const { return length_; }
#ifdef DEBUG
- void Print();
+ void Print() const;
#endif
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(BitVector);
diff --git a/deps/v8/src/utils/identity-map.cc b/deps/v8/src/utils/identity-map.cc
index 909c175007..6e22cc783a 100644
--- a/deps/v8/src/utils/identity-map.cc
+++ b/deps/v8/src/utils/identity-map.cc
@@ -26,7 +26,7 @@ void IdentityMapBase::Clear() {
DCHECK(!is_iterable());
DCHECK_NOT_NULL(strong_roots_entry_);
heap_->UnregisterStrongRoots(strong_roots_entry_);
- DeletePointerArray(reinterpret_cast<void**>(keys_), capacity_);
+ DeletePointerArray(reinterpret_cast<uintptr_t*>(keys_), capacity_);
DeletePointerArray(values_, capacity_);
keys_ = nullptr;
strong_roots_entry_ = nullptr;
@@ -47,8 +47,8 @@ void IdentityMapBase::DisableIteration() {
is_iterable_ = false;
}
-int IdentityMapBase::ScanKeysFor(Address address) const {
- int start = Hash(address) & mask_;
+int IdentityMapBase::ScanKeysFor(Address address, uint32_t hash) const {
+ int start = hash & mask_;
Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
for (int index = start; index < capacity_; index++) {
if (keys_[index] == address) return index; // Found.
@@ -61,33 +61,41 @@ int IdentityMapBase::ScanKeysFor(Address address) const {
return -1;
}
-int IdentityMapBase::InsertKey(Address address) {
+std::pair<int, bool> IdentityMapBase::InsertKey(Address address,
+ uint32_t hash) {
+ DCHECK_EQ(gc_counter_, heap_->gc_count());
+
+ // Grow the map if we reached >= 80% occupancy.
+ if (size_ + size_ / 4 >= capacity_) {
+ Resize(capacity_ * kResizeFactor);
+ }
+
Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
+
+ int start = hash & mask_;
+ // Guaranteed to terminate since size_ < capacity_, there must be at least
+ // one empty slot.
+ int index = start;
while (true) {
- int start = Hash(address) & mask_;
- int limit = capacity_ / 2;
- // Search up to {limit} entries.
- for (int index = start; --limit > 0; index = (index + 1) & mask_) {
- if (keys_[index] == address) return index; // Found.
- if (keys_[index] == not_mapped) { // Free entry.
- size_++;
- DCHECK_LE(size_, capacity_);
- keys_[index] = address;
- return index;
- }
+ if (keys_[index] == address) return {index, true}; // Found.
+ if (keys_[index] == not_mapped) { // Free entry.
+ size_++;
+ DCHECK_LE(size_, capacity_);
+ keys_[index] = address;
+ return {index, false};
}
- // Should only have to resize once, since we grow 4x.
- Resize(capacity_ * kResizeFactor);
+ index = (index + 1) & mask_;
+ // We should never loop back to the start.
+ DCHECK_NE(index, start);
}
- UNREACHABLE();
}
-bool IdentityMapBase::DeleteIndex(int index, void** deleted_value) {
+bool IdentityMapBase::DeleteIndex(int index, uintptr_t* deleted_value) {
if (deleted_value != nullptr) *deleted_value = values_[index];
Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
DCHECK_NE(keys_[index], not_mapped);
keys_[index] = not_mapped;
- values_[index] = nullptr;
+ values_[index] = 0;
size_--;
DCHECK_GE(size_, 0);
@@ -113,7 +121,7 @@ bool IdentityMapBase::DeleteIndex(int index, void** deleted_value) {
}
DCHECK_EQ(not_mapped, keys_[index]);
- DCHECK_NULL(values_[index]);
+ DCHECK_EQ(values_[index], 0);
std::swap(keys_[index], keys_[next_index]);
std::swap(values_[index], values_[next_index]);
index = next_index;
@@ -123,39 +131,69 @@ bool IdentityMapBase::DeleteIndex(int index, void** deleted_value) {
}
int IdentityMapBase::Lookup(Address key) const {
- int index = ScanKeysFor(key);
+ uint32_t hash = Hash(key);
+ int index = ScanKeysFor(key, hash);
if (index < 0 && gc_counter_ != heap_->gc_count()) {
// Miss; rehash if there was a GC, then lookup again.
const_cast<IdentityMapBase*>(this)->Rehash();
- index = ScanKeysFor(key);
+ index = ScanKeysFor(key, hash);
}
return index;
}
-int IdentityMapBase::LookupOrInsert(Address key) {
+std::pair<int, bool> IdentityMapBase::LookupOrInsert(Address key) {
+ uint32_t hash = Hash(key);
// Perform an optimistic lookup.
- int index = ScanKeysFor(key);
+ int index = ScanKeysFor(key, hash);
+ bool already_exists;
if (index < 0) {
// Miss; rehash if there was a GC, then insert.
if (gc_counter_ != heap_->gc_count()) Rehash();
- index = InsertKey(key);
+ std::tie(index, already_exists) = InsertKey(key, hash);
+ } else {
+ already_exists = true;
}
DCHECK_GE(index, 0);
- return index;
+ return {index, already_exists};
}
-int IdentityMapBase::Hash(Address address) const {
+uint32_t IdentityMapBase::Hash(Address address) const {
CHECK_NE(address, ReadOnlyRoots(heap_).not_mapped_symbol().ptr());
- return static_cast<int>(hasher_(address));
+ return static_cast<uint32_t>(hasher_(address));
}
// Searches this map for the given key using the object's address
// as the identity, returning:
-// found => a pointer to the storage location for the value
-// not found => a pointer to a new storage location for the value
-IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Address key) {
+// found => a pointer to the storage location for the value, true
+// not found => a pointer to a new storage location for the value, false
+IdentityMapFindResult<uintptr_t> IdentityMapBase::FindOrInsertEntry(
+ Address key) {
CHECK(!is_iterable()); // Don't allow insertion while iterable.
if (capacity_ == 0) {
+ return {InsertEntry(key), false};
+ }
+ auto lookup_result = LookupOrInsert(key);
+ return {&values_[lookup_result.first], lookup_result.second};
+}
+
+// Searches this map for the given key using the object's address
+// as the identity, returning:
+// found => a pointer to the storage location for the value
+// not found => {nullptr}
+IdentityMapBase::RawEntry IdentityMapBase::FindEntry(Address key) const {
+ // Don't allow find by key while iterable (might rehash).
+ CHECK(!is_iterable());
+ if (size_ == 0) return nullptr;
+ int index = Lookup(key);
+ return index >= 0 ? &values_[index] : nullptr;
+}
+
+// Inserts the given key using the object's address as the identity, returning
+// a pointer to the new storage location for the value.
+IdentityMapBase::RawEntry IdentityMapBase::InsertEntry(Address key) {
+ // Don't allow find by key while iterable (might rehash).
+ CHECK(!is_iterable());
+ if (capacity_ == 0) {
// Allocate the initial storage for keys and values.
capacity_ = kInitialIdentityMapSize;
mask_ = kInitialIdentityMapSize - 1;
@@ -165,32 +203,26 @@ IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Address key) {
Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
for (int i = 0; i < capacity_; i++) keys_[i] = not_mapped;
values_ = NewPointerArray(capacity_);
- memset(values_, 0, sizeof(void*) * capacity_);
+ memset(values_, 0, sizeof(uintptr_t) * capacity_);
strong_roots_entry_ = heap_->RegisterStrongRoots(
FullObjectSlot(keys_), FullObjectSlot(keys_ + capacity_));
+ } else {
+ // Rehash if there was a GC, then insert.
+ if (gc_counter_ != heap_->gc_count()) Rehash();
}
- int index = LookupOrInsert(key);
- return &values_[index];
-}
-// Searches this map for the given key using the object's address
-// as the identity, returning:
-// found => a pointer to the storage location for the value
-// not found => {nullptr}
-IdentityMapBase::RawEntry IdentityMapBase::FindEntry(Address key) const {
- // Don't allow find by key while iterable (might rehash).
- CHECK(!is_iterable());
- if (size_ == 0) return nullptr;
- // Remove constness since lookup might have to rehash.
- int index = Lookup(key);
- return index >= 0 ? &values_[index] : nullptr;
+ int index;
+ bool already_exists;
+ std::tie(index, already_exists) = InsertKey(key, Hash(key));
+ DCHECK(!already_exists);
+ return &values_[index];
}
// Deletes the given key from the map using the object's address as the
// identity, returning true iff the key was found (in which case, the value
// argument will be set to the deleted entry's value).
-bool IdentityMapBase::DeleteEntry(Address key, void** deleted_value) {
+bool IdentityMapBase::DeleteEntry(Address key, uintptr_t* deleted_value) {
CHECK(!is_iterable()); // Don't allow deletion by key while iterable.
if (size_ == 0) return false;
int index = Lookup(key);
@@ -232,7 +264,7 @@ void IdentityMapBase::Rehash() {
// Record the current GC counter.
gc_counter_ = heap_->gc_count();
// Assume that most objects won't be moved.
- std::vector<std::pair<Address, void*>> reinsert;
+ std::vector<std::pair<Address, uintptr_t>> reinsert;
// Search the table looking for keys that wouldn't be found with their
// current hashcode and evacuate them.
int last_empty = -1;
@@ -244,9 +276,9 @@ void IdentityMapBase::Rehash() {
int pos = Hash(keys_[i]) & mask_;
if (pos <= last_empty || pos > i) {
// Evacuate an entry that is in the wrong place.
- reinsert.push_back(std::pair<Address, void*>(keys_[i], values_[i]));
+ reinsert.push_back(std::pair<Address, uintptr_t>(keys_[i], values_[i]));
keys_[i] = not_mapped;
- values_[i] = nullptr;
+ values_[i] = 0;
last_empty = i;
size_--;
}
@@ -254,7 +286,7 @@ void IdentityMapBase::Rehash() {
}
// Reinsert all the key/value pairs that were in the wrong place.
for (auto pair : reinsert) {
- int index = InsertKey(pair.first);
+ int index = InsertKey(pair.first, Hash(pair.first)).first;
DCHECK_GE(index, 0);
values_[index] = pair.second;
}
@@ -266,7 +298,7 @@ void IdentityMapBase::Resize(int new_capacity) {
DCHECK_GT(new_capacity, size_);
int old_capacity = capacity_;
Address* old_keys = keys_;
- void** old_values = values_;
+ uintptr_t* old_values = values_;
capacity_ = new_capacity;
mask_ = capacity_ - 1;
@@ -277,11 +309,11 @@ void IdentityMapBase::Resize(int new_capacity) {
Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
for (int i = 0; i < capacity_; i++) keys_[i] = not_mapped;
values_ = NewPointerArray(capacity_);
- memset(values_, 0, sizeof(void*) * capacity_);
+ memset(values_, 0, sizeof(uintptr_t) * capacity_);
for (int i = 0; i < old_capacity; i++) {
if (old_keys[i] == not_mapped) continue;
- int index = InsertKey(old_keys[i]);
+ int index = InsertKey(old_keys[i], Hash(old_keys[i])).first;
DCHECK_GE(index, 0);
values_[index] = old_values[i];
}
@@ -292,7 +324,7 @@ void IdentityMapBase::Resize(int new_capacity) {
FullObjectSlot(keys_ + capacity_));
// Delete old storage;
- DeletePointerArray(reinterpret_cast<void**>(old_keys), old_capacity);
+ DeletePointerArray(reinterpret_cast<uintptr_t*>(old_keys), old_capacity);
DeletePointerArray(old_values, old_capacity);
}
diff --git a/deps/v8/src/utils/identity-map.h b/deps/v8/src/utils/identity-map.h
index 362a3decfa..20b5f100bf 100644
--- a/deps/v8/src/utils/identity-map.h
+++ b/deps/v8/src/utils/identity-map.h
@@ -5,6 +5,8 @@
#ifndef V8_UTILS_IDENTITY_MAP_H_
#define V8_UTILS_IDENTITY_MAP_H_
+#include <type_traits>
+
#include "src/base/functional.h"
#include "src/handles/handles.h"
#include "src/objects/heap-object.h"
@@ -16,6 +18,12 @@ namespace internal {
class Heap;
class StrongRootsEntry;
+template <typename T>
+struct IdentityMapFindResult {
+ T* entry;
+ bool already_exists;
+};
+
// Base class of identity maps contains shared code for all template
// instantions.
class V8_EXPORT_PRIVATE IdentityMapBase {
@@ -30,7 +38,7 @@ class V8_EXPORT_PRIVATE IdentityMapBase {
// within the {keys_} array in order to simulate a moving GC.
friend class IdentityMapTester;
- using RawEntry = void**;
+ using RawEntry = uintptr_t*;
explicit IdentityMapBase(Heap* heap)
: heap_(heap),
@@ -44,9 +52,10 @@ class V8_EXPORT_PRIVATE IdentityMapBase {
is_iterable_(false) {}
virtual ~IdentityMapBase();
- RawEntry GetEntry(Address key);
+ IdentityMapFindResult<uintptr_t> FindOrInsertEntry(Address key);
RawEntry FindEntry(Address key) const;
- bool DeleteEntry(Address key, void** deleted_value);
+ RawEntry InsertEntry(Address key);
+ bool DeleteEntry(Address key, uintptr_t* deleted_value);
void Clear();
Address KeyAtIndex(int index) const;
@@ -57,19 +66,19 @@ class V8_EXPORT_PRIVATE IdentityMapBase {
void EnableIteration();
void DisableIteration();
- virtual void** NewPointerArray(size_t length) = 0;
- virtual void DeletePointerArray(void** array, size_t length) = 0;
+ virtual uintptr_t* NewPointerArray(size_t length) = 0;
+ virtual void DeletePointerArray(uintptr_t* array, size_t length) = 0;
private:
// Internal implementation should not be called directly by subclasses.
- int ScanKeysFor(Address address) const;
- int InsertKey(Address address);
+ int ScanKeysFor(Address address, uint32_t hash) const;
+ std::pair<int, bool> InsertKey(Address address, uint32_t hash);
int Lookup(Address key) const;
- int LookupOrInsert(Address key);
- bool DeleteIndex(int index, void** deleted_value);
+ std::pair<int, bool> LookupOrInsert(Address key);
+ bool DeleteIndex(int index, uintptr_t* deleted_value);
void Rehash();
void Resize(int new_capacity);
- int Hash(Address address) const;
+ uint32_t Hash(Address address) const;
base::hash<uintptr_t> hasher_;
Heap* heap_;
@@ -79,7 +88,7 @@ class V8_EXPORT_PRIVATE IdentityMapBase {
int mask_;
Address* keys_;
StrongRootsEntry* strong_roots_entry_;
- void** values_;
+ uintptr_t* values_;
bool is_iterable_;
DISALLOW_COPY_AND_ASSIGN(IdentityMapBase);
@@ -89,11 +98,15 @@ class V8_EXPORT_PRIVATE IdentityMapBase {
// The map is robust w.r.t. garbage collection by synchronization with the
// supplied {heap}.
// * Keys are treated as strong roots.
-// * The value type {V} must be reinterpret_cast'able to {void*}
+// * The value type {V} must be reinterpret_cast'able to {uintptr_t}
// * The value type {V} must not be a heap type.
template <typename V, class AllocationPolicy>
class IdentityMap : public IdentityMapBase {
public:
+ STATIC_ASSERT(sizeof(V) <= sizeof(uintptr_t));
+ STATIC_ASSERT(std::is_trivially_copyable<V>::value);
+ STATIC_ASSERT(std::is_trivially_destructible<V>::value);
+
explicit IdentityMap(Heap* heap,
AllocationPolicy allocator = AllocationPolicy())
: IdentityMapBase(heap), allocator_(allocator) {}
@@ -101,10 +114,15 @@ class IdentityMap : public IdentityMapBase {
// Searches this map for the given key using the object's address
// as the identity, returning:
- // found => a pointer to the storage location for the value
- // not found => a pointer to a new storage location for the value
- V* Get(Handle<Object> key) { return Get(*key); }
- V* Get(Object key) { return reinterpret_cast<V*>(GetEntry(key.ptr())); }
+ // found => a pointer to the storage location for the value, true
+ // not found => a pointer to a new storage location for the value, false
+ IdentityMapFindResult<V> FindOrInsert(Handle<Object> key) {
+ return FindOrInsert(*key);
+ }
+ IdentityMapFindResult<V> FindOrInsert(Object key) {
+ auto raw = FindOrInsertEntry(key.ptr());
+ return {reinterpret_cast<V*>(raw.entry), raw.already_exists};
+ }
// Searches this map for the given key using the object's address
// as the identity, returning:
@@ -115,17 +133,18 @@ class IdentityMap : public IdentityMapBase {
return reinterpret_cast<V*>(FindEntry(key.ptr()));
}
- // Set the value for the given key.
- void Set(Handle<Object> key, V v) { Set(*key, v); }
- void Set(Object key, V v) {
- *(reinterpret_cast<V*>(GetEntry(key.ptr()))) = v;
+ // Insert the value for the given key. The key must not have previously
+ // existed.
+ void Insert(Handle<Object> key, V v) { Insert(*key, v); }
+ void Insert(Object key, V v) {
+ *reinterpret_cast<V*>(InsertEntry(key.ptr())) = v;
}
bool Delete(Handle<Object> key, V* deleted_value) {
return Delete(*key, deleted_value);
}
bool Delete(Object key, V* deleted_value) {
- void* v = nullptr;
+ uintptr_t v;
bool deleted_something = DeleteEntry(key.ptr(), &v);
if (deleted_value != nullptr && deleted_something) {
*deleted_value = *reinterpret_cast<V*>(&v);
@@ -188,12 +207,12 @@ class IdentityMap : public IdentityMapBase {
// TODO(ishell): consider removing virtual methods in favor of combining
// IdentityMapBase and IdentityMap into one class. This would also save
- // space when sizeof(V) is less than sizeof(void*).
- void** NewPointerArray(size_t length) override {
- return allocator_.template NewArray<void*, Buffer>(length);
+ // space when sizeof(V) is less than sizeof(uintptr_t).
+ uintptr_t* NewPointerArray(size_t length) override {
+ return allocator_.template NewArray<uintptr_t, Buffer>(length);
}
- void DeletePointerArray(void** array, size_t length) override {
- allocator_.template DeleteArray<void*, Buffer>(array, length);
+ void DeletePointerArray(uintptr_t* array, size_t length) override {
+ allocator_.template DeleteArray<uintptr_t, Buffer>(array, length);
}
private:
diff --git a/deps/v8/src/utils/locked-queue-inl.h b/deps/v8/src/utils/locked-queue-inl.h
index 9416dd7d37..edcdf03a5d 100644
--- a/deps/v8/src/utils/locked-queue-inl.h
+++ b/deps/v8/src/utils/locked-queue-inl.h
@@ -38,10 +38,10 @@ inline LockedQueue<Record>::~LockedQueue() {
}
template <typename Record>
-inline void LockedQueue<Record>::Enqueue(const Record& record) {
+inline void LockedQueue<Record>::Enqueue(Record record) {
Node* n = new Node();
CHECK_NOT_NULL(n);
- n->value = record;
+ n->value = std::move(record);
{
base::MutexGuard guard(&tail_mutex_);
tail_->next.SetValue(n);
@@ -57,7 +57,7 @@ inline bool LockedQueue<Record>::Dequeue(Record* record) {
old_head = head_;
Node* const next_node = head_->next.Value();
if (next_node == nullptr) return false;
- *record = next_node->value;
+ *record = std::move(next_node->value);
head_ = next_node;
}
delete old_head;
diff --git a/deps/v8/src/utils/locked-queue.h b/deps/v8/src/utils/locked-queue.h
index 4dd6488184..7594cc93c3 100644
--- a/deps/v8/src/utils/locked-queue.h
+++ b/deps/v8/src/utils/locked-queue.h
@@ -21,7 +21,7 @@ class LockedQueue final {
public:
inline LockedQueue();
inline ~LockedQueue();
- inline void Enqueue(const Record& record);
+ inline void Enqueue(Record record);
inline bool Dequeue(Record* record);
inline bool IsEmpty() const;
inline bool Peek(Record* record) const;
diff --git a/deps/v8/src/utils/utils.h b/deps/v8/src/utils/utils.h
index 7ec0dd2c00..af8f34030f 100644
--- a/deps/v8/src/utils/utils.h
+++ b/deps/v8/src/utils/utils.h
@@ -69,13 +69,13 @@ static T ArithmeticShiftRight(T x, int shift) {
// Returns the maximum of the two parameters.
template <typename T>
constexpr T Max(T a, T b) {
- return a < b ? b : a;
+ return std::max(a, b);
}
// Returns the minimum of the two parameters.
template <typename T>
constexpr T Min(T a, T b) {
- return a < b ? a : b;
+ return std::min(a, b);
}
// Returns the maximum of the two parameters according to JavaScript semantics.
@@ -135,6 +135,15 @@ inline double Modulo(double x, double y) {
}
template <typename T>
+T Saturate(int64_t value) {
+ static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
+ int64_t min = static_cast<int64_t>(std::numeric_limits<T>::min());
+ int64_t max = static_cast<int64_t>(std::numeric_limits<T>::max());
+ int64_t clamped = std::max(min, std::min(max, value));
+ return static_cast<T>(clamped);
+}
+
+template <typename T>
T SaturateAdd(T a, T b) {
if (std::is_signed<T>::value) {
if (a > 0 && b > 0) {
@@ -176,6 +185,53 @@ T SaturateSub(T a, T b) {
return a - b;
}
+template <typename T>
+T SaturateRoundingQMul(T a, T b) {
+ // Saturating rounding multiplication for Q-format numbers. See
+ // https://en.wikipedia.org/wiki/Q_(number_format) for a description.
+ // Specifically this supports Q7, Q15, and Q31. This follows the
+ // implementation in simulator-logic-arm64.cc (sqrdmulh) to avoid overflow
+ // when a == b == int32 min.
+ static_assert(std::is_integral<T>::value, "only integral types");
+
+ constexpr int size_in_bits = sizeof(T) * 8;
+ int round_const = 1 << (size_in_bits - 2);
+ int64_t product = a * b;
+ product += round_const;
+ product >>= (size_in_bits - 1);
+ return Saturate<T>(product);
+}
+
+// Multiply two numbers, returning a result that is twice as wide, no overflow.
+// Put Wide first so we can use function template argument deduction for Narrow,
+// and callers can provide only Wide.
+template <typename Wide, typename Narrow>
+Wide MultiplyLong(Narrow a, Narrow b) {
+ static_assert(
+ std::is_integral<Narrow>::value && std::is_integral<Wide>::value,
+ "only integral types");
+ static_assert(std::is_signed<Narrow>::value == std::is_signed<Wide>::value,
+ "both must have same signedness");
+ static_assert(sizeof(Narrow) * 2 == sizeof(Wide), "only twice as long");
+
+ return static_cast<Wide>(a) * static_cast<Wide>(b);
+}
+
+// Add two numbers, returning a result that is twice as wide, no overflow.
+// Put Wide first so we can use function template argument deduction for Narrow,
+// and callers can provide only Wide.
+template <typename Wide, typename Narrow>
+Wide AddLong(Narrow a, Narrow b) {
+ static_assert(
+ std::is_integral<Narrow>::value && std::is_integral<Wide>::value,
+ "only integral types");
+ static_assert(std::is_signed<Narrow>::value == std::is_signed<Wide>::value,
+ "both must have same signedness");
+ static_assert(sizeof(Narrow) * 2 == sizeof(Wide), "only twice as long");
+
+ return static_cast<Wide>(a) + static_cast<Wide>(b);
+}
+
// Helper macros for defining a contiguous sequence of field offset constants.
// Example: (backslashes at the ends of respective lines of this multi-line
// macro definition are omitted here to please the compiler)
@@ -682,6 +738,19 @@ static inline V ByteReverse(V value) {
}
}
+#if V8_OS_AIX
+// glibc on aix has a bug when using ceil, trunc or nearbyint:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=97086
+template <typename T>
+T FpOpWorkaround(T input, T value) {
+ if (/*if -*/ std::signbit(input) && value == 0.0 &&
+ /*if +*/ !std::signbit(value)) {
+ return -0.0;
+ }
+ return value;
+}
+#endif
+
V8_EXPORT_PRIVATE bool PassesFilter(Vector<const char> name,
Vector<const char> filter);
diff --git a/deps/v8/src/wasm/DIR_METADATA b/deps/v8/src/wasm/DIR_METADATA
new file mode 100644
index 0000000000..3b428d9660
--- /dev/null
+++ b/deps/v8/src/wasm/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>WebAssembly"
+} \ No newline at end of file
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index 801795058d..38224181e9 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -8,5 +8,3 @@ thibaudm@chromium.org
zhin@chromium.org
per-file wasm-js.*=adamk@chromium.org
-
-# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index b4966c012b..af969f387e 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -139,6 +139,8 @@ template <void (Assembler::*op)(Register, Register, const Operand&, SBit,
SBit, Condition)>
inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
+ // The compiler allocated registers such that either {dst == lhs} or there is
+ // no overlap between the two.
DCHECK_NE(dst.low_gp(), lhs.high_gp());
(assm->*op)(dst.low_gp(), lhs.low_gp(), Operand(imm), SetCC, al);
// Top half of the immediate sign extended, either 0 or -1.
@@ -532,16 +534,14 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
- DCHECK_LE(offset, kMaxInt);
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
+ DCHECK_LE(0, offset);
DCHECK_EQ(4, size);
ldr(dst, liftoff::GetInstanceOperand());
ldr(dst, MemOperand(dst, offset));
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
LoadFromInstance(dst, offset, kTaggedSize);
}
@@ -1005,11 +1005,13 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
if (cache_state()->is_used(LiftoffRegister(dst_high))) {
SpillRegister(LiftoffRegister(dst_high));
}
- UseScratchRegisterScope temps(this);
- Register actual_addr = liftoff::CalculateActualAddress(
- this, &temps, src_addr, offset_reg, offset_imm);
- ldrexd(dst_low, dst_high, actual_addr);
- dmb(ISH);
+ {
+ UseScratchRegisterScope temps(this);
+ Register actual_addr = liftoff::CalculateActualAddress(
+ this, &temps, src_addr, offset_reg, offset_imm);
+ ldrexd(dst_low, dst_high, actual_addr);
+ dmb(ISH);
+ }
ParallelRegisterMove(
{{dst, LiftoffRegister::ForPair(dst_low, dst_high), kWasmI64}});
@@ -1323,12 +1325,10 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
}
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
-#ifdef DEBUG
// The {str} instruction needs a temp register when the immediate in the
// provided MemOperand does not fit into 12 bits. This happens for large stack
// frames. This DCHECK checks that the temp register is available when needed.
DCHECK(UseScratchRegisterScope{this}.CanAcquire());
-#endif
DCHECK_LT(0, offset);
RecordUsedSpillOffset(offset);
MemOperand dst(fp, -offset);
@@ -2259,6 +2259,18 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
NeonMemOperand(actual_src_addr));
vmovl(NeonU32, liftoff::GetSimd128Register(dst), dst.low_fp());
}
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ Simd128Register dest = liftoff::GetSimd128Register(dst);
+ if (memtype == MachineType::Int32()) {
+ vmov(dest, 0);
+ vld1s(Neon32, NeonListOperand(dst.low_fp()), 0,
+ NeonMemOperand(actual_src_addr));
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ vmov(dest.high(), 0);
+ vld1(Neon64, NeonListOperand(dest.low()),
+ NeonMemOperand(actual_src_addr));
+ }
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
@@ -2921,6 +2933,23 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ QwNeonRegister dest = liftoff::GetSimd128Register(dst);
+ QwNeonRegister left = liftoff::GetSimd128Register(lhs);
+ QwNeonRegister right = liftoff::GetSimd128Register(rhs);
+
+ UseScratchRegisterScope temps(this);
+ Simd128Register scratch = temps.AcquireQ();
+
+ vmull(NeonS16, scratch, left.low(), right.low());
+ vpadd(Neon32, dest.low(), scratch.low(), scratch.high());
+
+ vmull(NeonS16, scratch, left.high(), right.high());
+ vpadd(Neon32, dest.high(), scratch.low(), scratch.high());
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
vdup(Neon16, liftoff::GetSimd128Register(dst), src.gp());
@@ -3015,9 +3044,9 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqadd(NeonS16, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
@@ -3028,16 +3057,16 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqsub(NeonS16, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqsub(NeonU16, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
@@ -3048,9 +3077,9 @@ void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqadd(NeonU16, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
@@ -3133,7 +3162,6 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
}
int table_size = src1 == src2 ? 2 : 4;
- uint32_t mask = table_size == 2 ? 0x0F0F0F0F : 0x1F1F1F1F;
int scratch_s_base = scratch.code() * 4;
for (int j = 0; j < 4; j++) {
@@ -3141,11 +3169,9 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
for (int i = 3; i >= 0; i--) {
imm = (imm << 8) | shuffle[j * 4 + i];
}
- uint32_t four_lanes = imm;
+ DCHECK_EQ(0, imm & (table_size == 2 ? 0xF0F0F0F0 : 0xE0E0E0E0));
// Ensure indices are in [0,15] if table_size is 2, or [0,31] if 4.
- four_lanes &= mask;
- vmov(SwVfpRegister::from_code(scratch_s_base + j),
- Float32::FromBits(four_lanes));
+ vmov(SwVfpRegister::from_code(scratch_s_base + j), Float32::FromBits(imm));
}
DwVfpRegister table_base = src1.low();
@@ -3277,9 +3303,9 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqadd(NeonS8, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
@@ -3290,16 +3316,16 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqsub(NeonS8, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqsub(NeonU8, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
@@ -3310,9 +3336,9 @@ void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqadd(NeonU8, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 4fe3abc544..402f0d2e84 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -186,25 +186,36 @@ int LiftoffAssembler::PrepareStackFrame() {
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
+ temps.Exclude(x16, x17);
+
+ // This is the previous stack pointer value (before we push the lr and the
+ // fp). We need to keep it to autenticate the lr and adjust the new stack
+ // pointer afterwards.
+ Add(x16, fp, 16);
+
+ // Load the fp and lr of the old frame, they will be pushed in the new frame
+ // during the actual call.
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ Ldp(fp, x17, MemOperand(fp));
+ Autib1716();
+ Mov(lr, x17);
+#else
+ Ldp(fp, lr, MemOperand(fp));
+#endif
- // Push the return address and frame pointer to complete the stack frame.
- sub(sp, sp, 16);
- ldr(scratch, MemOperand(fp, 8));
- Poke(scratch, 8);
- ldr(scratch, MemOperand(fp, 0));
- Poke(scratch, 0);
+ temps.Include(x17);
+
+ Register scratch = temps.AcquireX();
- // Shift the whole frame upwards.
- int slot_count = num_callee_stack_params + 2;
+ // Shift the whole frame upwards, except for fp and lr.
+ int slot_count = num_callee_stack_params;
for (int i = slot_count - 1; i >= 0; --i) {
ldr(scratch, MemOperand(sp, i * 8));
- str(scratch, MemOperand(fp, (i - stack_param_delta) * 8));
+ str(scratch, MemOperand(x16, (i - stack_param_delta) * 8));
}
- // Set the new stack and frame pointer.
- Sub(sp, fp, stack_param_delta * 8);
- Pop<kAuthLR>(fp, lr);
+ // Set the new stack pointer.
+ Sub(sp, x16, stack_param_delta * 8);
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
@@ -302,9 +313,8 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
- DCHECK_LE(offset, kMaxInt);
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
+ DCHECK_LE(0, offset);
Ldr(dst, liftoff::GetInstanceOperand());
DCHECK(size == 4 || size == 8);
if (size == 4) {
@@ -314,9 +324,8 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
}
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
- DCHECK_LE(offset, kMaxInt);
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
+ DCHECK_LE(0, offset);
Ldr(dst, liftoff::GetInstanceOperand());
LoadTaggedPointerField(dst, MemOperand(dst, offset));
}
@@ -676,11 +685,12 @@ void LiftoffAssembler::AtomicCompareExchange(
}
UseScratchRegisterScope temps(this);
- Register store_result = temps.AcquireW();
Register actual_addr = liftoff::CalculateActualAddress(
this, dst_addr, offset_reg, offset_imm, temps.AcquireX());
+ Register store_result = temps.AcquireW();
+
Label retry;
Label done;
Bind(&retry);
@@ -1495,6 +1505,13 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
Ldr(dst.fp().D(), src_op);
Uxtl(dst.fp().V2D(), dst.fp().V2S());
}
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ if (memtype == MachineType::Int32()) {
+ Ldr(dst.fp().S(), src_op);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ Ldr(dst.fp().D(), src_op);
+ }
} else {
// ld1r only allows no offset or post-index, so emit an add.
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
@@ -2003,6 +2020,17 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
Umax(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ UseScratchRegisterScope scope(this);
+ VRegister tmp1 = scope.AcquireV(kFormat4S);
+ VRegister tmp2 = scope.AcquireV(kFormat4S);
+ Smull(tmp1, lhs.fp().V4H(), rhs.fp().V4H());
+ Smull2(tmp2, lhs.fp().V8H(), rhs.fp().V8H());
+ Addp(dst.fp().V4S(), tmp1, tmp2);
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
Dup(dst.fp().V8H(), src.gp().W());
@@ -2105,9 +2133,9 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
Add(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Sqadd(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
@@ -2116,15 +2144,15 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
Sub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Sqsub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Uqsub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
@@ -2133,9 +2161,9 @@ void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
Mul(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Uqadd(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
@@ -2187,12 +2215,13 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
Mov(src2.Q(), rhs.fp().Q());
}
- uint8_t mask = lhs == rhs ? 0x0F : 0x1F;
int64_t imms[2] = {0, 0};
for (int i = 7; i >= 0; i--) {
- imms[0] = (imms[0] << 8) | (shuffle[i] & mask);
- imms[1] = (imms[1] << 8) | (shuffle[i + 8] & mask);
+ imms[0] = (imms[0] << 8) | (shuffle[i]);
+ imms[1] = (imms[1] << 8) | (shuffle[i + 8]);
}
+ DCHECK_EQ(0, (imms[0] | imms[1]) &
+ (lhs == rhs ? 0xF0F0F0F0F0F0F0F0 : 0xE0E0E0E0E0E0E0E0));
Movi(temp.V16B(), imms[1], imms[0]);
@@ -2307,9 +2336,9 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
Add(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Sqadd(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
@@ -2318,15 +2347,15 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
Sub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Sqsub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Uqsub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
@@ -2335,9 +2364,9 @@ void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
Mul(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Uqadd(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 3c2fccc997..5e640093c4 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -261,16 +261,14 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
- DCHECK_LE(offset, kMaxInt);
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
+ DCHECK_LE(0, offset);
mov(dst, liftoff::GetInstanceOperand());
DCHECK_EQ(4, size);
mov(dst, Operand(dst, offset));
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
LoadFromInstance(dst, offset, kTaggedSize);
}
@@ -1456,31 +1454,19 @@ template <void (Assembler::*op)(Register, const Immediate&),
void (Assembler::*op_with_carry)(Register, int32_t)>
inline void OpWithCarryI(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
- // First, compute the low half of the result, potentially into a temporary dst
- // register if {dst.low_gp()} equals any register we need to
- // keep alive for computing the upper half.
- LiftoffRegList keep_alive = LiftoffRegList::ForRegs(lhs.high_gp());
- Register dst_low = keep_alive.has(dst.low_gp())
- ? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
- : dst.low_gp();
-
- if (dst_low != lhs.low_gp()) assm->mov(dst_low, lhs.low_gp());
- (assm->*op)(dst_low, Immediate(imm));
+ // The compiler allocated registers such that either {dst == lhs} or there is
+ // no overlap between the two.
+ DCHECK_NE(dst.low_gp(), lhs.high_gp());
- // Now compute the upper half, while keeping alive the previous result.
- keep_alive = LiftoffRegList::ForRegs(dst_low);
- Register dst_high = keep_alive.has(dst.high_gp())
- ? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
- : dst.high_gp();
+ // First, compute the low half of the result.
+ if (dst.low_gp() != lhs.low_gp()) assm->mov(dst.low_gp(), lhs.low_gp());
+ (assm->*op)(dst.low_gp(), Immediate(imm));
- if (dst_high != lhs.high_gp()) assm->mov(dst_high, lhs.high_gp());
+ // Now compute the upper half.
+ if (dst.high_gp() != lhs.high_gp()) assm->mov(dst.high_gp(), lhs.high_gp());
// Top half of the immediate sign extended, either 0 or -1.
int32_t sign_extend = imm < 0 ? -1 : 0;
- (assm->*op_with_carry)(dst_high, sign_extend);
-
- // If necessary, move result into the right registers.
- LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
- if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
+ (assm->*op_with_carry)(dst.high_gp(), sign_extend);
}
} // namespace liftoff
@@ -2665,6 +2651,13 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
} else if (memtype == MachineType::Uint32()) {
Pmovzxdq(dst.fp(), src_op);
}
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ if (memtype == MachineType::Int32()) {
+ movss(dst.fp(), src_op);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ movsd(dst.fp(), src_op);
+ }
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
@@ -2700,15 +2693,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
for (int i = 3; i >= 0; i--) {
push_imm32(imms[i]);
}
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpshufb(dst.fp(), lhs.fp(), Operand(esp, 0));
- } else {
- if (dst != lhs) {
- movups(dst.fp(), lhs.fp());
- }
- pshufb(dst.fp(), Operand(esp, 0));
- }
+ Pshufb(dst.fp(), lhs.fp(), Operand(esp, 0));
mov(esp, tmp.gp());
return;
}
@@ -2723,7 +2708,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
}
push(Immediate(mask));
}
- Pshufb(liftoff::kScratchDoubleReg, Operand(esp, 0));
+ Pshufb(liftoff::kScratchDoubleReg, lhs.fp(), Operand(esp, 0));
for (int i = 3; i >= 0; i--) {
uint32_t mask = 0;
@@ -2734,10 +2719,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
}
push(Immediate(mask));
}
- if (dst.fp() != rhs.fp()) {
- movups(dst.fp(), rhs.fp());
- }
- Pshufb(dst.fp(), Operand(esp, 0));
+ Pshufb(dst.fp(), rhs.fp(), Operand(esp, 0));
Por(dst.fp(), liftoff::kScratchDoubleReg);
mov(esp, tmp.gp());
}
@@ -2751,10 +2733,7 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
TurboAssembler::Move(mask, uint32_t{0x70707070});
Pshufd(mask, mask, uint8_t{0x0});
Paddusb(mask, rhs.fp());
- if (lhs != dst) {
- Movaps(dst.fp(), lhs.fp());
- }
- Pshufb(dst.fp(), mask);
+ Pshufb(dst.fp(), lhs.fp(), mask);
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
@@ -3211,16 +3190,16 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddsb, &Assembler::paddsb>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddusb, &Assembler::paddusb>(
this, dst, lhs, rhs);
}
@@ -3231,16 +3210,16 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubsb, &Assembler::psubsb>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubusb,
&Assembler::psubusb>(this, dst, lhs,
rhs);
@@ -3409,16 +3388,16 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddsw, &Assembler::paddsw>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddusw, &Assembler::paddusw>(
this, dst, lhs, rhs);
}
@@ -3429,16 +3408,16 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubsw, &Assembler::psubsw>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubusw,
&Assembler::psubusw>(this, dst, lhs,
rhs);
@@ -3588,6 +3567,13 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaddwd, &Assembler::pmaddwd>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
DoubleRegister reg =
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index e219025e53..dea5221ac6 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -37,6 +37,7 @@ class StackTransferRecipe {
struct RegisterLoad {
enum LoadKind : uint8_t {
+ kNop, // no-op, used for high fp of a fp pair.
kConstant, // load a constant value into a register.
kStack, // fill a register from a stack slot.
kLowHalfStack, // fill a register from the low half of a stack slot.
@@ -63,6 +64,10 @@ class StackTransferRecipe {
return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kWasmI32,
offset};
}
+ static RegisterLoad Nop() {
+ // ValueType does not matter.
+ return {kNop, kWasmI32, 0};
+ }
private:
RegisterLoad(LoadKind kind, ValueType type, int32_t value)
@@ -71,6 +76,8 @@ class StackTransferRecipe {
public:
explicit StackTransferRecipe(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
+ StackTransferRecipe(const StackTransferRecipe&) = delete;
+ StackTransferRecipe& operator=(const StackTransferRecipe&) = delete;
~StackTransferRecipe() { Execute(); }
void Execute() {
@@ -217,11 +224,11 @@ class StackTransferRecipe {
RegisterLoad::HalfStack(stack_offset, kHighWord);
} else if (dst.is_fp_pair()) {
DCHECK_EQ(kWasmS128, type);
- // load_dst_regs_.set above will set both low and high fp regs.
- // But unlike gp_pair, we load a kWasm128 in one go in ExecuteLoads.
- // So unset the top fp register to skip loading it.
- load_dst_regs_.clear(dst.high());
+ // Only need register_load for low_gp since we load 128 bits at one go.
+ // Both low and high need to be set in load_dst_regs_ but when iterating
+ // over it, both low and high will be cleared, so we won't load twice.
*register_load(dst.low()) = RegisterLoad::Stack(stack_offset, type);
+ *register_load(dst.high()) = RegisterLoad::Nop();
} else {
*register_load(dst) = RegisterLoad::Stack(stack_offset, type);
}
@@ -318,6 +325,8 @@ class StackTransferRecipe {
for (LiftoffRegister dst : load_dst_regs_) {
RegisterLoad* load = register_load(dst);
switch (load->kind) {
+ case RegisterLoad::kNop:
+ break;
case RegisterLoad::kConstant:
asm_->LoadConstant(dst, load->type == kWasmI64
? WasmValue(int64_t{load->value})
@@ -343,8 +352,6 @@ class StackTransferRecipe {
}
load_dst_regs_ = {};
}
-
- DISALLOW_COPY_AND_ASSIGN(StackTransferRecipe);
};
class RegisterReuseMap {
@@ -519,9 +526,7 @@ int LiftoffAssembler::GetTotalFrameSlotCountForGC() const {
namespace {
-constexpr AssemblerOptions DefaultLiftoffOptions() {
- return AssemblerOptions{};
-}
+AssemblerOptions DefaultLiftoffOptions() { return AssemblerOptions{}; }
} // namespace
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index e2bd99841f..895abbbbb4 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -478,8 +478,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadConstant(LiftoffRegister, WasmValue,
RelocInfo::Mode rmode = RelocInfo::NONE);
- inline void LoadFromInstance(Register dst, uint32_t offset, int size);
- inline void LoadTaggedPointerFromInstance(Register dst, uint32_t offset);
+ inline void LoadFromInstance(Register dst, int offset, int size);
+ inline void LoadTaggedPointerFromInstance(Register dst, int offset);
inline void SpillInstance(Register instance);
inline void FillInstanceInto(Register dst);
inline void LoadTaggedPointer(Register dst, Register src_addr,
@@ -675,6 +675,15 @@ class LiftoffAssembler : public TurboAssembler {
}
}
+ inline void emit_ptrsize_zeroextend_i32(Register dst, Register src) {
+ if (kSystemPointerSize == 8) {
+ emit_type_conversion(kExprI64UConvertI32, LiftoffRegister(dst),
+ LiftoffRegister(src));
+ } else if (dst != src) {
+ Move(dst, src, kWasmI32);
+ }
+ }
+
// f32 binops.
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
@@ -852,20 +861,16 @@ class LiftoffAssembler : public TurboAssembler {
int32_t rhs);
inline void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
- inline void emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
+ inline void emit_i8x16_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
- inline void emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
+ inline void emit_i8x16_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i8x16_min_s(LiftoffRegister dst, LiftoffRegister lhs,
@@ -894,20 +899,16 @@ class LiftoffAssembler : public TurboAssembler {
int32_t rhs);
inline void emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
- inline void emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
+ inline void emit_i16x8_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
- inline void emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
+ inline void emit_i16x8_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i16x8_min_s(LiftoffRegister dst, LiftoffRegister lhs,
@@ -948,6 +949,8 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_i32x4_max_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i32x4_dot_i16x8_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i64x2_neg(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
@@ -1302,6 +1305,8 @@ void LiftoffAssembler::emit_i64_xori(LiftoffRegister dst, LiftoffRegister lhs,
class LiftoffStackSlots {
public:
explicit LiftoffStackSlots(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
+ LiftoffStackSlots(const LiftoffStackSlots&) = delete;
+ LiftoffStackSlots& operator=(const LiftoffStackSlots&) = delete;
void Add(const LiftoffAssembler::VarState& src, uint32_t src_offset,
RegPairHalf half) {
@@ -1328,8 +1333,6 @@ class LiftoffStackSlots {
base::SmallVector<Slot, 8> slots_;
LiftoffAssembler* const asm_;
-
- DISALLOW_COPY_AND_ASSIGN(LiftoffStackSlots);
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 447be8cdae..1ead202ea0 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -258,9 +258,9 @@ class DebugSideTableBuilder {
class LiftoffCompiler {
public:
// TODO(clemensb): Make this a template parameter.
- static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
+ static constexpr Decoder::ValidateFlag validate = Decoder::kBooleanValidation;
- using Value = ValueBase;
+ using Value = ValueBase<validate>;
static constexpr auto kI32 = ValueType::kI32;
static constexpr auto kI64 = ValueType::kI64;
@@ -273,7 +273,7 @@ class LiftoffCompiler {
LiftoffAssembler::CacheState state;
};
- struct Control : public ControlBase<Value> {
+ struct Control : public ControlBase<Value, validate> {
std::unique_ptr<ElseState> else_state;
LiftoffAssembler::CacheState label_state;
MovableLabel label;
@@ -557,7 +557,7 @@ class LiftoffCompiler {
void StartFunctionBody(FullDecoder* decoder, Control* block) {
for (uint32_t i = 0; i < __ num_locals(); ++i) {
if (!CheckSupportedType(decoder,
- FLAG_liftoff_extern_ref
+ FLAG_experimental_liftoff_extern_ref
? kSupportedTypes
: kSupportedTypesWithoutRefs,
__ local_type(i), "param"))
@@ -621,7 +621,7 @@ class LiftoffCompiler {
}
}
- if (FLAG_liftoff_extern_ref) {
+ if (FLAG_experimental_liftoff_extern_ref) {
// Initialize all reference type locals with ref.null.
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
@@ -690,30 +690,6 @@ class LiftoffCompiler {
}
if (FLAG_trace_wasm) TraceFunctionEntry(decoder);
-
- // If we are generating debug code, do check the "hook on function call"
- // flag. If set, trigger a break.
- if (V8_UNLIKELY(for_debugging_)) {
- // If there is a breakpoint set on the first instruction (== start of the
- // function), then skip the check for "hook on function call", since we
- // will unconditionally break there anyway.
- bool has_breakpoint = next_breakpoint_ptr_ != nullptr &&
- (*next_breakpoint_ptr_ == 0 ||
- *next_breakpoint_ptr_ == decoder->position());
- if (!has_breakpoint) {
- DEBUG_CODE_COMMENT("check hook on function call");
- Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
- LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress,
- kSystemPointerSize);
- Label no_break;
- __ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U,
- {});
- // Unary "equal" means "equals zero".
- __ emit_cond_jump(kEqual, &no_break, kWasmI32, flag);
- EmitBreakpoint(decoder);
- __ bind(&no_break);
- }
- }
}
void GenerateOutOfLineCode(OutOfLineCode* ool) {
@@ -799,14 +775,14 @@ class LiftoffCompiler {
}
V8_NOINLINE void EmitDebuggingInfo(FullDecoder* decoder, WasmOpcode opcode) {
- DCHECK(V8_UNLIKELY(for_debugging_));
+ DCHECK(for_debugging_);
+ if (!WasmOpcodes::IsBreakable(opcode)) return;
+ bool has_breakpoint = false;
if (next_breakpoint_ptr_) {
if (*next_breakpoint_ptr_ == 0) {
// A single breakpoint at offset 0 indicates stepping.
DCHECK_EQ(next_breakpoint_ptr_ + 1, next_breakpoint_end_);
- if (WasmOpcodes::IsBreakable(opcode)) {
- EmitBreakpoint(decoder);
- }
+ has_breakpoint = true;
} else {
while (next_breakpoint_ptr_ != next_breakpoint_end_ &&
*next_breakpoint_ptr_ < decoder->position()) {
@@ -816,18 +792,34 @@ class LiftoffCompiler {
if (next_breakpoint_ptr_ == next_breakpoint_end_) {
next_breakpoint_ptr_ = next_breakpoint_end_ = nullptr;
} else if (*next_breakpoint_ptr_ == decoder->position()) {
- DCHECK(WasmOpcodes::IsBreakable(opcode));
- EmitBreakpoint(decoder);
+ has_breakpoint = true;
}
}
}
- if (dead_breakpoint_ == decoder->position()) {
+ if (has_breakpoint) {
+ EmitBreakpoint(decoder);
+ // Once we emitted a breakpoint, we don't need to check the "hook on
+ // function call" any more.
+ checked_hook_on_function_call_ = true;
+ } else if (!checked_hook_on_function_call_) {
+ checked_hook_on_function_call_ = true;
+ // Check the "hook on function call" flag. If set, trigger a break.
+ DEBUG_CODE_COMMENT("check hook on function call");
+ Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
+ LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress, kSystemPointerSize);
+ Label no_break;
+ __ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U, {});
+ // Unary "equal" means "equals zero".
+ __ emit_cond_jump(kEqual, &no_break, kWasmI32, flag);
+ EmitBreakpoint(decoder);
+ __ bind(&no_break);
+ } else if (dead_breakpoint_ == decoder->position()) {
DCHECK(!next_breakpoint_ptr_ ||
*next_breakpoint_ptr_ != dead_breakpoint_);
// The top frame is paused at this position, but the breakpoint was
- // removed. Adding a dead breakpoint here ensures that the source position
- // exists, and that the offset to the return address is the same as in the
- // old code.
+ // removed. Adding a dead breakpoint here ensures that the source
+ // position exists, and that the offset to the return address is the
+ // same as in the old code.
Label cont;
__ emit_jump(&cont);
EmitBreakpoint(decoder);
@@ -843,7 +835,8 @@ class LiftoffCompiler {
#ifdef DEBUG
SLOW_DCHECK(__ ValidateCacheState());
if (WasmOpcodes::IsPrefixOpcode(opcode)) {
- opcode = decoder->read_prefixed_opcode<Decoder::kValidate>(decoder->pc());
+ opcode = decoder->read_prefixed_opcode<Decoder::kFullValidation>(
+ decoder->pc());
}
DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode));
#endif
@@ -1251,9 +1244,12 @@ class LiftoffCompiler {
int32_t imm = rhs_slot.i32_const();
LiftoffRegister lhs = __ PopToRegister();
+ // Either reuse {lhs} for {dst}, or choose a register (pair) which does
+ // not overlap, for easier code generation.
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(lhs);
LiftoffRegister dst = src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {lhs}, {})
- : __ GetUnusedRegister(result_rc, {});
+ ? __ GetUnusedRegister(result_rc, {lhs}, pinned)
+ : __ GetUnusedRegister(result_rc, pinned);
CallEmitFn(fnImm, dst, lhs, imm);
__ PushRegister(ValueType::Primitive(result_type), dst);
@@ -1632,7 +1628,7 @@ class LiftoffCompiler {
}
void RefNull(FullDecoder* decoder, ValueType type, Value*) {
- if (!FLAG_liftoff_extern_ref) {
+ if (!FLAG_experimental_liftoff_extern_ref) {
unsupported(decoder, kRefTypes, "ref_null");
return;
}
@@ -1815,7 +1811,7 @@ class LiftoffCompiler {
const GlobalIndexImmediate<validate>& imm) {
const auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder,
- FLAG_liftoff_extern_ref
+ FLAG_experimental_liftoff_extern_ref
? kSupportedTypes
: kSupportedTypesWithoutRefs,
global->type, "global")) {
@@ -1854,7 +1850,7 @@ class LiftoffCompiler {
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder,
- FLAG_liftoff_extern_ref
+ FLAG_experimental_liftoff_extern_ref
? kSupportedTypes
: kSupportedTypesWithoutRefs,
global->type, "global")) {
@@ -2184,25 +2180,36 @@ class LiftoffCompiler {
__ SpillAllRegisters();
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
- // Get one register for computing the address (offset + index).
- LiftoffRegister address = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- // Compute offset+index in address.
- __ LoadConstant(address, WasmValue(offset));
- __ emit_i32_add(address.gp(), address.gp(), index);
+ // Get one register for computing the effective offset (offset + index).
+ LiftoffRegister effective_offset =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(effective_offset, WasmValue(offset));
+ __ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index);
// Get a register to hold the stack slot for MemoryTracingInfo.
LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
// Allocate stack slot for MemoryTracingInfo.
__ AllocateStackSlot(info.gp(), sizeof(MemoryTracingInfo));
+ // Reuse the {effective_offset} register for all information to be stored in
+ // the MemoryTracingInfo struct.
+ LiftoffRegister data = effective_offset;
+
// Now store all information into the MemoryTracingInfo struct.
- __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, address), address,
- StoreType::kI32Store, pinned);
- __ LoadConstant(address, WasmValue(is_store ? 1 : 0));
- __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, is_store), address,
+ if (kSystemPointerSize == 8) {
+ // Zero-extend the effective offset to u64.
+ CHECK(__ emit_type_conversion(kExprI64UConvertI32, data, effective_offset,
+ nullptr));
+ }
+ __ Store(
+ info.gp(), no_reg, offsetof(MemoryTracingInfo, offset), data,
+ kSystemPointerSize == 8 ? StoreType::kI64Store : StoreType::kI32Store,
+ pinned);
+ __ LoadConstant(data, WasmValue(is_store ? 1 : 0));
+ __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, is_store), data,
StoreType::kI32Store8, pinned);
- __ LoadConstant(address, WasmValue(static_cast<int>(rep)));
- __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, mem_rep), address,
+ __ LoadConstant(data, WasmValue(static_cast<int>(rep)));
+ __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, mem_rep), data,
StoreType::kI32Store8, pinned);
WasmTraceMemoryDescriptor descriptor;
@@ -2287,15 +2294,11 @@ class LiftoffCompiler {
return;
}
- if (transform == LoadTransformationKind::kZeroExtend) {
- unsupported(decoder, kSimd, "prototyping s128 load zero extend");
- return;
- }
-
LiftoffRegList pinned;
Register index = pinned.set(__ PopToRegister()).gp();
- // For load splats, LoadType is the size of the load, and for load
- // extends, LoadType is the size of the lane, and it always loads 8 bytes.
+ // For load splats and load zero, LoadType is the size of the load, and for
+ // load extends, LoadType is the size of the lane, and it always loads 8
+ // bytes.
uint32_t access_size =
transform == LoadTransformationKind::kExtend ? 8 : type.size();
if (BoundsCheckMem(decoder, access_size, imm.offset, index, pinned,
@@ -2330,6 +2333,12 @@ class LiftoffCompiler {
}
}
+ void LoadLane(FullDecoder* decoder, LoadType type, const Value& value,
+ const Value& index, const MemoryAccessImmediate<validate>& imm,
+ const uint8_t laneidx, Value* result) {
+ unsupported(decoder, kSimd, "simd load lane");
+ }
+
void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, const Value& value_val) {
@@ -2364,6 +2373,12 @@ class LiftoffCompiler {
}
}
+ void StoreLane(FullDecoder* decoder, StoreType type,
+ const MemoryAccessImmediate<validate>& imm, const Value& index,
+ const Value& value, const uint8_t laneidx) {
+ unsupported(decoder, kSimd, "simd load lane");
+ }
+
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
Register mem_size = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
@@ -2658,20 +2673,16 @@ class LiftoffCompiler {
&LiftoffAssembler::emit_i8x16_shri_u);
case wasm::kExprI8x16Add:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add);
- case wasm::kExprI8x16AddSaturateS:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i8x16_add_saturate_s);
- case wasm::kExprI8x16AddSaturateU:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i8x16_add_saturate_u);
+ case wasm::kExprI8x16AddSatS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add_sat_s);
+ case wasm::kExprI8x16AddSatU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add_sat_u);
case wasm::kExprI8x16Sub:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub);
- case wasm::kExprI8x16SubSaturateS:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i8x16_sub_saturate_s);
- case wasm::kExprI8x16SubSaturateU:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i8x16_sub_saturate_u);
+ case wasm::kExprI8x16SubSatS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub_sat_s);
+ case wasm::kExprI8x16SubSatU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub_sat_u);
case wasm::kExprI8x16Mul:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_mul);
case wasm::kExprI8x16MinS:
@@ -2701,20 +2712,16 @@ class LiftoffCompiler {
&LiftoffAssembler::emit_i16x8_shri_u);
case wasm::kExprI16x8Add:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add);
- case wasm::kExprI16x8AddSaturateS:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i16x8_add_saturate_s);
- case wasm::kExprI16x8AddSaturateU:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i16x8_add_saturate_u);
+ case wasm::kExprI16x8AddSatS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add_sat_s);
+ case wasm::kExprI16x8AddSatU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add_sat_u);
case wasm::kExprI16x8Sub:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_sub);
- case wasm::kExprI16x8SubSaturateS:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i16x8_sub_saturate_s);
- case wasm::kExprI16x8SubSaturateU:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i16x8_sub_saturate_u);
+ case wasm::kExprI16x8SubSatS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_sub_sat_s);
+ case wasm::kExprI16x8SubSatU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_sub_sat_u);
case wasm::kExprI16x8Mul:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_mul);
case wasm::kExprI16x8MinS:
@@ -2756,6 +2763,9 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_max_s);
case wasm::kExprI32x4MaxU:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_max_u);
+ case wasm::kExprI32x4DotI16x8S:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_dot_i16x8_s);
case wasm::kExprI64x2Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_neg);
case wasm::kExprI64x2Shl:
@@ -3238,13 +3248,15 @@ class LiftoffCompiler {
uint32_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
- Register index_plus_offset = index_reg;
+ Register index_plus_offset =
+ __ cache_state()->is_used(LiftoffRegister(index_reg))
+ ? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
+ : index_reg;
if (offset) {
- if (__ cache_state()->is_used(LiftoffRegister(index_reg))) {
- index_plus_offset =
- pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- }
__ emit_i32_addi(index_plus_offset, index_reg, offset);
+ __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_plus_offset);
+ } else {
+ __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
}
LiftoffAssembler::VarState timeout =
@@ -3285,7 +3297,7 @@ class LiftoffCompiler {
}
}
- ValueType sig_reps[] = {kWasmI32, type, kWasmI64};
+ ValueType sig_reps[] = {kPointerValueType, type, kWasmI64};
FunctionSig sig(0, 3, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor,
@@ -3313,16 +3325,18 @@ class LiftoffCompiler {
uint32_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
- Register index_plus_offset = index_reg;
+ Register index_plus_offset =
+ __ cache_state()->is_used(LiftoffRegister(index_reg))
+ ? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
+ : index_reg;
if (offset) {
- if (__ cache_state()->is_used(LiftoffRegister(index_reg))) {
- index_plus_offset =
- pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- }
__ emit_i32_addi(index_plus_offset, index_reg, offset);
+ __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_plus_offset);
+ } else {
+ __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
}
- ValueType sig_reps[] = {kWasmI32, kWasmI32, kWasmI32};
+ ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32};
FunctionSig sig(1, 2, sig_reps);
auto call_descriptor =
GetBuiltinCallDescriptor<WasmAtomicNotifyDescriptor>(compilation_zone_);
@@ -3806,7 +3820,7 @@ class LiftoffCompiler {
const Value args[], Value returns[], CallKind call_kind) {
for (ValueType ret : imm.sig->returns()) {
if (!CheckSupportedType(decoder,
- FLAG_liftoff_extern_ref
+ FLAG_experimental_liftoff_extern_ref
? kSupportedTypes
: kSupportedTypesWithoutRefs,
ret, "return")) {
@@ -3888,7 +3902,7 @@ class LiftoffCompiler {
}
for (ValueType ret : imm.sig->returns()) {
if (!CheckSupportedType(decoder,
- FLAG_liftoff_extern_ref
+ FLAG_experimental_liftoff_extern_ref
? kSupportedTypes
: kSupportedTypesWithoutRefs,
ret, "return")) {
@@ -3915,9 +3929,10 @@ class LiftoffCompiler {
// Bounds check against the table size.
Label* invalid_func_label = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapFuncInvalid);
+ decoder->position(), WasmCode::kThrowWasmTrapTableOutOfBounds);
- uint32_t canonical_sig_num = env_->module->signature_ids[imm.sig_index];
+ uint32_t canonical_sig_num =
+ env_->module->canonicalized_type_ids[imm.sig_index];
DCHECK_GE(canonical_sig_num, 0);
DCHECK_GE(kMaxInt, canonical_sig_num);
@@ -4057,6 +4072,11 @@ class LiftoffCompiler {
// address in OSR is correct.
int dead_breakpoint_ = 0;
+ // Remember whether the "hook on function call" has already been checked.
+ // This happens at the first breakable opcode in the function (if compiling
+ // for debugging).
+ bool checked_hook_on_function_call_ = false;
+
bool has_outstanding_op() const {
return outstanding_op_ != kNoOutstandingOp;
}
@@ -4094,15 +4114,11 @@ WasmCompilationResult ExecuteLiftoffCompilation(
std::unique_ptr<DebugSideTable>* debug_sidetable, int dead_breakpoint) {
int func_body_size = static_cast<int>(func_body.end - func_body.start);
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
- "wasm.CompileBaseline", "func_index", func_index, "body_size",
+ "wasm.CompileBaseline", "funcIndex", func_index, "bodySize",
func_body_size);
Zone zone(allocator, "LiftoffCompilationZone");
auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
- base::Optional<TimedHistogramScope> liftoff_compile_time_scope;
- if (counters) {
- liftoff_compile_time_scope.emplace(counters->liftoff_compile_time());
- }
size_t code_size_estimate =
WasmCodeManager::EstimateLiftoffCodeSize(func_body_size);
// Allocate the initial buffer a bit bigger to avoid reallocation during code
@@ -4115,18 +4131,14 @@ WasmCompilationResult ExecuteLiftoffCompilation(
if (debug_sidetable) {
debug_sidetable_builder = std::make_unique<DebugSideTableBuilder>();
}
- WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder(
+ WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder(
&zone, env->module, env->enabled_features, detected, func_body,
call_descriptor, env, &zone, instruction_buffer->CreateView(),
debug_sidetable_builder.get(), for_debugging, func_index, breakpoints,
dead_breakpoint);
decoder.Decode();
- liftoff_compile_time_scope.reset();
LiftoffCompiler* compiler = &decoder.interface();
- if (decoder.failed()) {
- compiler->OnFirstError(&decoder);
- return WasmCompilationResult{};
- }
+ if (decoder.failed()) compiler->OnFirstError(&decoder);
if (counters) {
// Check that the histogram for the bailout reasons has the correct size.
@@ -4172,7 +4184,7 @@ std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
DebugSideTableBuilder debug_sidetable_builder;
WasmFeatures detected;
- WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder(
+ WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder(
&zone, env->module, env->enabled_features, &detected, func_body,
call_descriptor, env, &zone,
NewAssemblerBuffer(AssemblerBase::kDefaultBufferSize),
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index 49aac008f0..285af7dac0 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -137,8 +137,8 @@ static_assert(2 * kBitsPerGpRegCode >= kBitsPerFpRegCode,
class LiftoffRegister {
static constexpr int needed_bits =
- Max(kNeedI64RegPair || kNeedS128RegPair ? kBitsPerRegPair : 0,
- kBitsPerLiftoffRegCode);
+ std::max(kNeedI64RegPair || kNeedS128RegPair ? kBitsPerRegPair : 0,
+ kBitsPerLiftoffRegCode);
using storage_t = std::conditional<
needed_bits <= 8, uint8_t,
std::conditional<needed_bits <= 16, uint16_t, uint32_t>::type>::type;
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 97b8487848..5c78eca319 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -360,16 +360,16 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
+void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
int size) {
- DCHECK_LE(offset, kMaxInt);
+ DCHECK_LE(0, offset);
lw(dst, liftoff::GetInstanceOperand());
DCHECK_EQ(4, size);
lw(dst, MemOperand(dst, offset));
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+ int32_t offset) {
LoadFromInstance(dst, offset, kTaggedSize);
}
@@ -1883,16 +1883,16 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i8x16_add");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add_saturate_s");
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_s");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add_saturate_u");
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_u");
}
void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1900,16 +1900,16 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i8x16_sub");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub_saturate_s");
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_s");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub_saturate_u");
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_u");
}
void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1998,16 +1998,16 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i16x8_add");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_add_saturate_s");
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_s");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_add_saturate_u");
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_u");
}
void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2015,16 +2015,16 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i16x8_sub");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_sub_saturate_s");
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_s");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_sub_saturate_u");
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_u");
}
void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2147,6 +2147,12 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
bailout(kSimd, "emit_i32x4_max_u");
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_dot_i16x8_s");
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i64x2_neg");
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 4c6c1fe1ce..b97c49437f 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -5,6 +5,7 @@
#ifndef V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
#define V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
+#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
namespace v8 {
@@ -339,9 +340,9 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
+void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
int size) {
- DCHECK_LE(offset, kMaxInt);
+ DCHECK_LE(0, offset);
Ld(dst, liftoff::GetInstanceOperand());
DCHECK(size == 4 || size == 8);
if (size == 4) {
@@ -352,7 +353,7 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+ int32_t offset) {
LoadFromInstance(dst, offset, kTaggedSize);
}
@@ -378,7 +379,27 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
int32_t offset_imm,
LiftoffRegister src,
LiftoffRegList pinned) {
- bailout(kRefTypes, "GlobalSet");
+ DCHECK_GE(offset_imm, 0);
+ DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
+ STATIC_ASSERT(kTaggedSize == kInt64Size);
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Sd(src.gp(), MemOperand(dst_addr, offset_imm));
+
+ Label write_barrier;
+ Label exit;
+ CheckPageFlag(dst_addr, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ &write_barrier);
+ b(&exit);
+ bind(&write_barrier);
+ JumpIfSmi(src.gp(), &exit);
+ CheckPageFlag(src.gp(), scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ &exit);
+ Daddu(scratch, dst_addr, offset_imm);
+ CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
+ wasm::WasmCode::kRecordWrite);
+ bind(&exit);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
@@ -1487,6 +1508,16 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
fill_d(dst_msa, scratch);
ilvr_w(dst_msa, kSimd128RegZero, dst_msa);
}
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ xor_v(dst_msa, dst_msa, dst_msa);
+ if (memtype == MachineType::Int32()) {
+ Lwu(scratch, src_op);
+ insert_w(dst_msa, 0, scratch);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ Ld(scratch, src_op);
+ insert_d(dst_msa, 0, scratch);
+ }
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
@@ -1841,15 +1872,15 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
addv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
adds_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
adds_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
@@ -1858,15 +1889,15 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
subv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
subs_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
subs_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
@@ -1970,15 +2001,15 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
addv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
adds_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
adds_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
@@ -1987,15 +2018,15 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
subv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
subs_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
subs_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
@@ -2131,6 +2162,12 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
max_u_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ dotp_s_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
@@ -2264,6 +2301,8 @@ void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
// dst = (scratch1 <= scratch0) ? scratch1 : scratch0.
fsle_w(dst_msa, scratch1, scratch0);
bsel_v(dst_msa, scratch0, scratch1);
+ // Canonicalize the result.
+ fmin_w(dst_msa, dst_msa, dst_msa);
}
void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2284,6 +2323,8 @@ void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
// dst = (scratch0 <= scratch1) ? scratch1 : scratch0.
fsle_w(dst_msa, scratch0, scratch1);
bsel_v(dst_msa, scratch0, scratch1);
+ // Canonicalize the result.
+ fmax_w(dst_msa, dst_msa, dst_msa);
}
void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2383,6 +2424,8 @@ void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
// dst = (scratch1 <= scratch0) ? scratch1 : scratch0.
fsle_d(dst_msa, scratch1, scratch0);
bsel_v(dst_msa, scratch0, scratch1);
+ // Canonicalize the result.
+ fmin_d(dst_msa, dst_msa, dst_msa);
}
void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2403,6 +2446,8 @@ void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
// dst = (scratch0 <= scratch1) ? scratch1 : scratch0.
fsle_d(dst_msa, scratch0, scratch1);
bsel_v(dst_msa, scratch0, scratch1);
+ // Canonicalize the result.
+ fmax_d(dst_msa, dst_msa, dst_msa);
}
void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
diff --git a/deps/v8/src/wasm/baseline/ppc/OWNERS b/deps/v8/src/wasm/baseline/ppc/OWNERS
index 6edd45a6ef..02c2cd757c 100644
--- a/deps/v8/src/wasm/baseline/ppc/OWNERS
+++ b/deps/v8/src/wasm/baseline/ppc/OWNERS
@@ -2,3 +2,4 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
+vasili.skurydzin@ibm.com
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index ef7b720ea9..f75e9db459 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -88,13 +88,11 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
bailout(kUnsupportedArchitecture, "LoadConstant");
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
bailout(kUnsupportedArchitecture, "LoadFromInstance");
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
}
@@ -944,6 +942,12 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4_max_u");
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_dot_i16x8_s");
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i16x8splat");
@@ -1006,9 +1010,9 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8add");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_s");
}
@@ -1017,15 +1021,15 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8sub");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_s");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_u");
}
@@ -1034,9 +1038,9 @@ void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8mul");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_u");
}
@@ -1172,9 +1176,9 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16add");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_s");
}
@@ -1485,15 +1489,15 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16sub");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_s");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_u");
}
@@ -1502,9 +1506,9 @@ void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16mul");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_u");
}
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index dc6ce2f0b3..a88baa1146 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -87,13 +87,11 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
bailout(kUnsupportedArchitecture, "LoadConstant");
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
bailout(kUnsupportedArchitecture, "LoadFromInstance");
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
}
@@ -948,6 +946,12 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4_max_u");
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_dot_i16x8_s");
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i16x8splat");
@@ -1010,9 +1014,9 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8add");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_s");
}
@@ -1021,15 +1025,15 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8sub");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_s");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_u");
}
@@ -1038,9 +1042,9 @@ void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8mul");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_u");
}
@@ -1176,9 +1180,9 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16add");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_s");
}
@@ -1187,15 +1191,15 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16sub");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_s");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_u");
}
@@ -1204,9 +1208,9 @@ void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16mul");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_u");
}
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 713a1ce72a..a64b0e2e37 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -236,11 +236,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
- DCHECK_LE(offset, kMaxInt);
- movq(dst, liftoff::GetInstanceOperand());
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
+ DCHECK_LE(0, offset);
DCHECK(size == 4 || size == 8);
+ movq(dst, liftoff::GetInstanceOperand());
if (size == 4) {
movl(dst, Operand(dst, offset));
} else {
@@ -248,9 +247,8 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
}
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
- DCHECK_LE(offset, kMaxInt);
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
+ DCHECK_LE(0, offset);
movq(dst, liftoff::GetInstanceOperand());
LoadTaggedPointerField(dst, Operand(dst, offset));
}
@@ -2232,11 +2230,11 @@ void EmitI64x2ShrS(LiftoffAssembler* assm, LiftoffRegister dst,
assm->Pextrq(tmp, lhs.fp(), int8_t{0x0});
assm->sarq_cl(tmp);
- assm->Pinsrq(dst.fp(), tmp, int8_t{0x0});
+ assm->Pinsrq(dst.fp(), tmp, uint8_t{0x0});
assm->Pextrq(tmp, lhs.fp(), int8_t{0x1});
assm->sarq_cl(tmp);
- assm->Pinsrq(dst.fp(), tmp, int8_t{0x1});
+ assm->Pinsrq(dst.fp(), tmp, uint8_t{0x1});
// restore rcx.
if (restore_rcx) {
@@ -2289,14 +2287,21 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
} else if (memtype == MachineType::Uint32()) {
Pmovzxdq(dst.fp(), src_op);
}
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ if (memtype == MachineType::Int32()) {
+ Movss(dst.fp(), src_op);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ Movsd(dst.fp(), src_op);
+ }
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
- Pinsrb(dst.fp(), src_op, 0);
+ Pinsrb(dst.fp(), dst.fp(), src_op, 0);
Pxor(kScratchDoubleReg, kScratchDoubleReg);
Pshufb(dst.fp(), kScratchDoubleReg);
} else if (memtype == MachineType::Int16()) {
- Pinsrw(dst.fp(), src_op, 0);
+ Pinsrw(dst.fp(), dst.fp(), src_op, 0);
Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
Punpcklqdq(dst.fp(), dst.fp());
} else if (memtype == MachineType::Int32()) {
@@ -2304,8 +2309,8 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
CpuFeatureScope avx_scope(this, AVX);
vbroadcastss(dst.fp(), src_op);
} else {
- Movss(dst.fp(), src_op);
- Shufps(dst.fp(), dst.fp(), byte{0});
+ movss(dst.fp(), src_op);
+ shufps(dst.fp(), dst.fp(), byte{0});
}
} else if (memtype == MachineType::Int64()) {
Movddup(dst.fp(), src_op);
@@ -2324,22 +2329,10 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
wasm::SimdShuffle::Pack16Lanes(imms, shuffle);
TurboAssembler::Move(kScratchDoubleReg, make_uint64(imms[3], imms[2]),
make_uint64(imms[1], imms[0]));
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpshufb(dst.fp(), lhs.fp(), kScratchDoubleReg);
- } else {
- if (dst != lhs) {
- movups(dst.fp(), lhs.fp());
- }
- pshufb(dst.fp(), kScratchDoubleReg);
- }
+ Pshufb(dst.fp(), lhs.fp(), kScratchDoubleReg);
return;
}
- LiftoffRegister tmp_simd =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs, rhs));
- Movups(kScratchDoubleReg, lhs.fp());
-
uint64_t mask1[2] = {};
for (int i = 15; i >= 0; i--) {
uint8_t lane = shuffle[i];
@@ -2347,10 +2340,8 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
mask1[j] <<= 8;
mask1[j] |= lane < kSimd128Size ? lane : 0x80;
}
- TurboAssembler::Move(tmp_simd.fp(), mask1[0]);
- movq(kScratchRegister, mask1[1]);
- Pinsrq(tmp_simd.fp(), kScratchRegister, int8_t{1});
- Pshufb(kScratchDoubleReg, tmp_simd.fp());
+ TurboAssembler::Move(liftoff::kScratchDoubleReg2, mask1[1], mask1[0]);
+ Pshufb(kScratchDoubleReg, lhs.fp(), liftoff::kScratchDoubleReg2);
uint64_t mask2[2] = {};
for (int i = 15; i >= 0; i--) {
@@ -2359,14 +2350,9 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
mask2[j] <<= 8;
mask2[j] |= lane >= kSimd128Size ? (lane & 0x0F) : 0x80;
}
- TurboAssembler::Move(tmp_simd.fp(), mask2[0]);
- movq(kScratchRegister, mask2[1]);
- Pinsrq(tmp_simd.fp(), kScratchRegister, int8_t{1});
+ TurboAssembler::Move(liftoff::kScratchDoubleReg2, mask2[1], mask2[0]);
- if (dst.fp() != rhs.fp()) {
- Movups(dst.fp(), rhs.fp());
- }
- Pshufb(dst.fp(), tmp_simd.fp());
+ Pshufb(dst.fp(), rhs.fp(), liftoff::kScratchDoubleReg2);
Por(dst.fp(), kScratchDoubleReg);
}
@@ -2379,10 +2365,7 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
TurboAssembler::Move(mask, uint32_t{0x70707070});
Pshufd(mask, mask, uint8_t{0x0});
Paddusb(mask, rhs.fp());
- if (lhs != dst) {
- Movaps(dst.fp(), lhs.fp());
- }
- Pshufb(dst.fp(), mask);
+ Pshufb(dst.fp(), lhs.fp(), mask);
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
@@ -2413,10 +2396,7 @@ void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() != src.fp()) {
- Movss(dst.fp(), src.fp());
- }
- Shufps(dst.fp(), src.fp(), static_cast<byte>(0));
+ Shufps(dst.fp(), src.fp(), 0);
}
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
@@ -2659,7 +2639,7 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
memcpy(vals, imms, sizeof(vals));
TurboAssembler::Move(dst.fp(), vals[0]);
movq(kScratchRegister, vals[1]);
- Pinsrq(dst.fp(), kScratchRegister, int8_t{1});
+ Pinsrq(dst.fp(), kScratchRegister, uint8_t{1});
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
@@ -2827,16 +2807,16 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddsb, &Assembler::paddsb>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddusb, &Assembler::paddusb>(
this, dst, lhs, rhs);
}
@@ -2847,16 +2827,16 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubsb, &Assembler::psubsb>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubusb,
&Assembler::psubusb>(this, dst, lhs,
rhs);
@@ -3025,16 +3005,16 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddsw, &Assembler::paddsw>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddusw, &Assembler::paddusw>(
this, dst, lhs, rhs);
}
@@ -3045,16 +3025,16 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubsw, &Assembler::psubsw>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubusw,
&Assembler::psubusw>(this, dst, lhs,
rhs);
@@ -3204,6 +3184,13 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaddwd, &Assembler::pmaddwd>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
DoubleRegister reg = dst.fp() == src.fp() ? kScratchDoubleReg : dst.fp();
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index 0bb6552943..f79833464d 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -209,8 +209,7 @@ auto seal(const typename implement<C>::type* x) -> const C* {
// Configuration
-struct ConfigImpl {
-};
+struct ConfigImpl {};
template <>
struct implement<Config> {
@@ -888,8 +887,8 @@ own<Instance> GetInstance(StoreImpl* store,
own<Frame> CreateFrameFromInternal(i::Handle<i::FixedArray> frames, int index,
i::Isolate* isolate, StoreImpl* store) {
- i::Handle<i::StackTraceFrame> frame(i::StackTraceFrame::cast(frames->get(0)),
- isolate);
+ i::Handle<i::StackTraceFrame> frame(
+ i::StackTraceFrame::cast(frames->get(index)), isolate);
i::Handle<i::WasmInstanceObject> instance =
i::StackTraceFrame::GetWasmInstance(frame);
uint32_t func_index = i::StackTraceFrame::GetWasmFunctionIndex(frame);
@@ -1511,7 +1510,8 @@ auto Func::call(const Val args[], Val results[]) const -> own<Trap> {
auto store = func->store();
auto isolate = store->i_isolate();
i::HandleScope handle_scope(isolate);
- i::Object raw_function_data = func->v8_object()->shared().function_data();
+ i::Object raw_function_data =
+ func->v8_object()->shared().function_data(v8::kAcquireLoad);
// WasmCapiFunctions can be called directly.
if (raw_function_data.IsWasmCapiFunctionData()) {
@@ -1544,7 +1544,7 @@ auto Func::call(const Val args[], Val results[]) const -> own<Trap> {
if (object_ref->IsTuple2()) {
i::JSFunction jsfunc =
i::JSFunction::cast(i::Tuple2::cast(*object_ref).value2());
- i::Object data = jsfunc.shared().function_data();
+ i::Object data = jsfunc.shared().function_data(v8::kAcquireLoad);
if (data.IsWasmCapiFunctionData()) {
return CallWasmCapiFunction(i::WasmCapiFunctionData::cast(data), args,
results);
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 86cec955b9..458b564313 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -13,7 +13,6 @@
#include "src/base/memory.h"
#include "src/codegen/signature.h"
#include "src/flags/flags.h"
-#include "src/utils/utils.h"
#include "src/utils/vector.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
@@ -39,9 +38,12 @@ using DecodeResult = VoidResult;
// a buffer of bytes.
class Decoder {
public:
- enum ValidateFlag : bool { kValidate = true, kNoValidate = false };
-
- enum AdvancePCFlag : bool { kAdvancePc = true, kNoAdvancePc = false };
+ // {ValidateFlag} can be used in a boolean manner ({if (!validate) ...}).
+ enum ValidateFlag : int8_t {
+ kNoValidation = 0, // Don't run validation, assume valid input.
+ kBooleanValidation, // Run validation but only store a generic error.
+ kFullValidation // Run full validation with error message and location.
+ };
enum TraceFlag : bool { kTrace = true, kNoTrace = false };
@@ -59,7 +61,7 @@ class Decoder {
virtual ~Decoder() = default;
- inline bool validate_size(const byte* pc, uint32_t length, const char* msg) {
+ bool validate_size(const byte* pc, uint32_t length, const char* msg) {
DCHECK_LE(start_, pc);
if (V8_UNLIKELY(pc > end_ || length > static_cast<uint32_t>(end_ - pc))) {
error(pc, msg);
@@ -70,28 +72,25 @@ class Decoder {
// Reads an 8-bit unsigned integer.
template <ValidateFlag validate>
- inline uint8_t read_u8(const byte* pc, const char* msg = "expected 1 byte") {
+ uint8_t read_u8(const byte* pc, const char* msg = "expected 1 byte") {
return read_little_endian<uint8_t, validate>(pc, msg);
}
// Reads a 16-bit unsigned integer (little endian).
template <ValidateFlag validate>
- inline uint16_t read_u16(const byte* pc,
- const char* msg = "expected 2 bytes") {
+ uint16_t read_u16(const byte* pc, const char* msg = "expected 2 bytes") {
return read_little_endian<uint16_t, validate>(pc, msg);
}
// Reads a 32-bit unsigned integer (little endian).
template <ValidateFlag validate>
- inline uint32_t read_u32(const byte* pc,
- const char* msg = "expected 4 bytes") {
+ uint32_t read_u32(const byte* pc, const char* msg = "expected 4 bytes") {
return read_little_endian<uint32_t, validate>(pc, msg);
}
// Reads a 64-bit unsigned integer (little endian).
template <ValidateFlag validate>
- inline uint64_t read_u64(const byte* pc,
- const char* msg = "expected 8 bytes") {
+ uint64_t read_u64(const byte* pc, const char* msg = "expected 8 bytes") {
return read_little_endian<uint64_t, validate>(pc, msg);
}
@@ -99,72 +98,64 @@ class Decoder {
template <ValidateFlag validate>
uint32_t read_u32v(const byte* pc, uint32_t* length,
const char* name = "LEB32") {
- return read_leb<uint32_t, validate, kNoAdvancePc, kNoTrace>(pc, length,
- name);
+ return read_leb<uint32_t, validate, kNoTrace>(pc, length, name);
}
// Reads a variable-length signed integer (little endian).
template <ValidateFlag validate>
int32_t read_i32v(const byte* pc, uint32_t* length,
const char* name = "signed LEB32") {
- return read_leb<int32_t, validate, kNoAdvancePc, kNoTrace>(pc, length,
- name);
+ return read_leb<int32_t, validate, kNoTrace>(pc, length, name);
}
// Reads a variable-length unsigned integer (little endian).
template <ValidateFlag validate>
uint64_t read_u64v(const byte* pc, uint32_t* length,
const char* name = "LEB64") {
- return read_leb<uint64_t, validate, kNoAdvancePc, kNoTrace>(pc, length,
- name);
+ return read_leb<uint64_t, validate, kNoTrace>(pc, length, name);
}
// Reads a variable-length signed integer (little endian).
template <ValidateFlag validate>
int64_t read_i64v(const byte* pc, uint32_t* length,
const char* name = "signed LEB64") {
- return read_leb<int64_t, validate, kNoAdvancePc, kNoTrace>(pc, length,
- name);
+ return read_leb<int64_t, validate, kNoTrace>(pc, length, name);
}
// Reads a variable-length 33-bit signed integer (little endian).
template <ValidateFlag validate>
int64_t read_i33v(const byte* pc, uint32_t* length,
const char* name = "signed LEB33") {
- return read_leb<int64_t, validate, kNoAdvancePc, kNoTrace, 33>(pc, length,
- name);
+ return read_leb<int64_t, validate, kNoTrace, 33>(pc, length, name);
+ }
+
+ // Convenient overload for callers who don't care about length.
+ template <ValidateFlag validate>
+ WasmOpcode read_prefixed_opcode(const byte* pc) {
+ uint32_t len;
+ return read_prefixed_opcode<validate>(pc, &len);
}
// Reads a prefixed-opcode, possibly with variable-length index.
- // The length param is set to the number of bytes this index is encoded with.
- // For most cases (non variable-length), it will be 1.
+ // `length` is set to the number of bytes that make up this opcode,
+ // *including* the prefix byte. For most opcodes, it will be 2.
template <ValidateFlag validate>
- WasmOpcode read_prefixed_opcode(const byte* pc, uint32_t* length = nullptr,
+ WasmOpcode read_prefixed_opcode(const byte* pc, uint32_t* length,
const char* name = "prefixed opcode") {
- uint32_t unused_length;
- if (length == nullptr) {
- length = &unused_length;
- }
uint32_t index;
- if (*pc == WasmOpcode::kSimdPrefix) {
- // SIMD opcodes can be multiple bytes (when LEB128 encoded).
- index = read_u32v<validate>(pc + 1, length, "prefixed opcode index");
- // Only support SIMD opcodes that go up to 0xFF (when decoded). Anything
- // bigger will need 1 more byte, and the '<< 8' below will be wrong.
- if (validate && V8_UNLIKELY(index > 0xff)) {
- errorf(pc, "Invalid SIMD opcode %d", index);
- }
- } else {
- if (!validate || validate_size(pc, 2, "expected 2 bytes")) {
- DCHECK(validate_size(pc, 2, "expected 2 bytes"));
- index = *(pc + 1);
- *length = 1;
- } else {
- // If kValidate and size validation fails.
- index = 0;
- *length = 0;
- }
+
+ // Prefixed opcodes all use LEB128 encoding.
+ index = read_u32v<validate>(pc + 1, length, "prefixed opcode index");
+ *length += 1; // Prefix byte.
+ // Only support opcodes that go up to 0xFF (when decoded). Anything
+ // bigger will need 1 more byte, and the '<< 8' below will be wrong.
+ if (validate && V8_UNLIKELY(index > 0xff)) {
+ errorf(pc, "Invalid prefixed opcode %d", index);
+ // If size validation fails.
+ index = 0;
+ *length = 0;
}
+
return static_cast<WasmOpcode>((*pc) << 8 | index);
}
@@ -186,21 +177,28 @@ class Decoder {
// Reads a LEB128 variable-length unsigned 32-bit integer and advances {pc_}.
uint32_t consume_u32v(const char* name = nullptr) {
uint32_t length = 0;
- return read_leb<uint32_t, kValidate, kAdvancePc, kTrace>(pc_, &length,
- name);
+ uint32_t result =
+ read_leb<uint32_t, kFullValidation, kTrace>(pc_, &length, name);
+ pc_ += length;
+ return result;
}
// Reads a LEB128 variable-length signed 32-bit integer and advances {pc_}.
int32_t consume_i32v(const char* name = nullptr) {
uint32_t length = 0;
- return read_leb<int32_t, kValidate, kAdvancePc, kTrace>(pc_, &length, name);
+ int32_t result =
+ read_leb<int32_t, kFullValidation, kTrace>(pc_, &length, name);
+ pc_ += length;
+ return result;
}
// Reads a LEB128 variable-length unsigned 64-bit integer and advances {pc_}.
uint64_t consume_u64v(const char* name = nullptr) {
uint32_t length = 0;
- return read_leb<uint64_t, kValidate, kAdvancePc, kTrace>(pc_, &length,
- name);
+ uint64_t result =
+ read_leb<uint64_t, kFullValidation, kTrace>(pc_, &length, name);
+ pc_ += length;
+ return result;
}
// Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
@@ -224,6 +222,14 @@ class Decoder {
return true;
}
+ // Use this for "boolean validation", i.e. if the error message is not used
+ // anyway.
+ void V8_NOINLINE MarkError() {
+ if (!ok()) return;
+ error_ = {0, "validation failed"};
+ onFirstError();
+ }
+
// Do not inline error methods. This has measurable impact on validation time,
// see https://crbug.com/910432.
void V8_NOINLINE error(const char* msg) { errorf(pc_offset(), "%s", msg); }
@@ -234,6 +240,13 @@ class Decoder {
errorf(offset, "%s", msg);
}
+ void V8_NOINLINE PRINTF_FORMAT(2, 3) errorf(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ verrorf(pc_offset(), format, args);
+ va_end(args);
+ }
+
void V8_NOINLINE PRINTF_FORMAT(3, 4)
errorf(uint32_t offset, const char* format, ...) {
va_list args;
@@ -343,8 +356,8 @@ class Decoder {
onFirstError();
}
- template <typename IntType, bool validate>
- inline IntType read_little_endian(const byte* pc, const char* msg) {
+ template <typename IntType, ValidateFlag validate>
+ IntType read_little_endian(const byte* pc, const char* msg) {
if (!validate) {
DCHECK(validate_size(pc, sizeof(IntType), msg));
} else if (!validate_size(pc, sizeof(IntType), msg)) {
@@ -354,36 +367,59 @@ class Decoder {
}
template <typename IntType>
- inline IntType consume_little_endian(const char* name) {
+ IntType consume_little_endian(const char* name) {
TRACE(" +%u %-20s: ", pc_offset(), name);
if (!checkAvailable(sizeof(IntType))) {
traceOffEnd();
pc_ = end_;
return IntType{0};
}
- IntType val = read_little_endian<IntType, false>(pc_, name);
+ IntType val = read_little_endian<IntType, kNoValidation>(pc_, name);
traceByteRange(pc_, pc_ + sizeof(IntType));
TRACE("= %d\n", val);
pc_ += sizeof(IntType);
return val;
}
- template <typename IntType, ValidateFlag validate, AdvancePCFlag advance_pc,
- TraceFlag trace, size_t size_in_bits = 8 * sizeof(IntType)>
- inline IntType read_leb(const byte* pc, uint32_t* length,
- const char* name = "varint") {
- DCHECK_IMPLIES(advance_pc, pc == pc_);
+ template <typename IntType, ValidateFlag validate, TraceFlag trace,
+ size_t size_in_bits = 8 * sizeof(IntType)>
+ V8_INLINE IntType read_leb(const byte* pc, uint32_t* length,
+ const char* name = "varint") {
static_assert(size_in_bits <= 8 * sizeof(IntType),
"leb does not fit in type");
TRACE_IF(trace, " +%u %-20s: ", pc_offset(), name);
- return read_leb_tail<IntType, validate, advance_pc, trace, size_in_bits, 0>(
- pc, length, name, 0);
+ // Fast path for single-byte integers.
+ if ((!validate || V8_LIKELY(pc < end_)) && !(*pc & 0x80)) {
+ TRACE_IF(trace, "%02x ", *pc);
+ *length = 1;
+ IntType result = *pc;
+ if (std::is_signed<IntType>::value) {
+ // Perform sign extension.
+ constexpr int sign_ext_shift = int{8 * sizeof(IntType)} - 7;
+ result = (result << sign_ext_shift) >> sign_ext_shift;
+ TRACE_IF(trace, "= %" PRIi64 "\n", static_cast<int64_t>(result));
+ } else {
+ TRACE_IF(trace, "= %" PRIu64 "\n", static_cast<uint64_t>(result));
+ }
+ return result;
+ }
+ return read_leb_slowpath<IntType, validate, trace, size_in_bits>(pc, length,
+ name);
+ }
+
+ template <typename IntType, ValidateFlag validate, TraceFlag trace,
+ size_t size_in_bits = 8 * sizeof(IntType)>
+ V8_NOINLINE IntType read_leb_slowpath(const byte* pc, uint32_t* length,
+ const char* name) {
+ // Create an unrolled LEB decoding function per integer type.
+ return read_leb_tail<IntType, validate, trace, size_in_bits, 0>(pc, length,
+ name, 0);
}
- template <typename IntType, ValidateFlag validate, AdvancePCFlag advance_pc,
- TraceFlag trace, size_t size_in_bits, int byte_index>
- IntType read_leb_tail(const byte* pc, uint32_t* length, const char* name,
- IntType result) {
+ template <typename IntType, ValidateFlag validate, TraceFlag trace,
+ size_t size_in_bits, int byte_index>
+ V8_INLINE IntType read_leb_tail(const byte* pc, uint32_t* length,
+ const char* name, IntType result) {
constexpr bool is_signed = std::is_signed<IntType>::value;
constexpr int kMaxLength = (size_in_bits + 6) / 7;
static_assert(byte_index < kMaxLength, "invalid template instantiation");
@@ -404,15 +440,19 @@ class Decoder {
// Compilers are not smart enough to figure out statically that the
// following call is unreachable if is_last_byte is false.
constexpr int next_byte_index = byte_index + (is_last_byte ? 0 : 1);
- return read_leb_tail<IntType, validate, advance_pc, trace, size_in_bits,
+ return read_leb_tail<IntType, validate, trace, size_in_bits,
next_byte_index>(pc + 1, length, name, result);
}
- if (advance_pc) pc_ = pc + (at_end ? 0 : 1);
*length = byte_index + (at_end ? 0 : 1);
if (validate && V8_UNLIKELY(at_end || (b & 0x80))) {
TRACE_IF(trace, at_end ? "<end> " : "<length overflow> ");
- errorf(pc, "expected %s", name);
+ if (validate == kFullValidation) {
+ errorf(pc, "expected %s", name);
+ } else {
+ MarkError();
+ }
result = 0;
+ *length = 0;
}
if (is_last_byte) {
// A signed-LEB128 must sign-extend the final byte, excluding its
@@ -431,12 +471,17 @@ class Decoder {
if (!validate) {
DCHECK(valid_extra_bits);
} else if (V8_UNLIKELY(!valid_extra_bits)) {
- error(pc, "extra bits in varint");
+ if (validate == kFullValidation) {
+ error(pc, "extra bits in varint");
+ } else {
+ MarkError();
+ }
result = 0;
+ *length = 0;
}
}
constexpr int sign_ext_shift =
- is_signed ? Max(0, int{8 * sizeof(IntType)} - shift - 7) : 0;
+ is_signed ? std::max(0, int{8 * sizeof(IntType)} - shift - 7) : 0;
// Perform sign extension.
result = (result << sign_ext_shift) >> sign_ext_shift;
if (trace && is_signed) {
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 42b36f359b..3e07806d89 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -44,15 +44,14 @@ struct WasmException;
return true; \
}())
-#define CHECK_PROTOTYPE_OPCODE(feat) \
- DCHECK(this->module_->origin == kWasmOrigin); \
- if (!VALIDATE(this->enabled_.has_##feat())) { \
- this->errorf(this->pc(), \
- "Invalid opcode 0x%x (enable with --experimental-wasm-" #feat \
- ")", \
- opcode); \
- return 0; \
- } \
+#define CHECK_PROTOTYPE_OPCODE(feat) \
+ DCHECK(this->module_->origin == kWasmOrigin); \
+ if (!VALIDATE(this->enabled_.has_##feat())) { \
+ this->DecodeError( \
+ "Invalid opcode 0x%x (enable with --experimental-wasm-" #feat ")", \
+ opcode); \
+ return 0; \
+ } \
this->detected_->Add(kFeature_##feat);
#define ATOMIC_OP_LIST(V) \
@@ -125,6 +124,57 @@ struct WasmException;
V(I64AtomicStore16U, Uint16) \
V(I64AtomicStore32U, Uint32)
+// Decoder error with explicit PC and format arguments.
+template <Decoder::ValidateFlag validate, typename... Args>
+void DecodeError(Decoder* decoder, const byte* pc, const char* str,
+ Args&&... args) {
+ CHECK(validate == Decoder::kFullValidation ||
+ validate == Decoder::kBooleanValidation);
+ STATIC_ASSERT(sizeof...(Args) > 0);
+ if (validate == Decoder::kBooleanValidation) {
+ decoder->MarkError();
+ } else {
+ decoder->errorf(pc, str, std::forward<Args>(args)...);
+ }
+}
+
+// Decoder error with explicit PC and no format arguments.
+template <Decoder::ValidateFlag validate>
+void DecodeError(Decoder* decoder, const byte* pc, const char* str) {
+ CHECK(validate == Decoder::kFullValidation ||
+ validate == Decoder::kBooleanValidation);
+ if (validate == Decoder::kBooleanValidation) {
+ decoder->MarkError();
+ } else {
+ decoder->error(pc, str);
+ }
+}
+
+// Decoder error without explicit PC, but with format arguments.
+template <Decoder::ValidateFlag validate, typename... Args>
+void DecodeError(Decoder* decoder, const char* str, Args&&... args) {
+ CHECK(validate == Decoder::kFullValidation ||
+ validate == Decoder::kBooleanValidation);
+ STATIC_ASSERT(sizeof...(Args) > 0);
+ if (validate == Decoder::kBooleanValidation) {
+ decoder->MarkError();
+ } else {
+ decoder->errorf(str, std::forward<Args>(args)...);
+ }
+}
+
+// Decoder error without explicit PC and without format arguments.
+template <Decoder::ValidateFlag validate>
+void DecodeError(Decoder* decoder, const char* str) {
+ CHECK(validate == Decoder::kFullValidation ||
+ validate == Decoder::kBooleanValidation);
+ if (validate == Decoder::kBooleanValidation) {
+ decoder->MarkError();
+ } else {
+ decoder->error(str);
+ }
+}
+
namespace value_type_reader {
V8_INLINE WasmFeature feature_for_heap_type(HeapType heap_type) {
@@ -147,6 +197,12 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
uint32_t* const length, const WasmFeatures& enabled) {
int64_t heap_index = decoder->read_i33v<validate>(pc, length, "heap type");
if (heap_index < 0) {
+ int64_t min_1_byte_leb128 = -64;
+ if (heap_index < min_1_byte_leb128) {
+ DecodeError<validate>(decoder, pc, "Unknown heap type %" PRId64,
+ heap_index);
+ return HeapType(HeapType::kBottom);
+ }
uint8_t uint_7_mask = 0x7F;
uint8_t code = static_cast<ValueTypeCode>(heap_index) & uint_7_mask;
switch (code) {
@@ -157,8 +213,9 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
case kI31RefCode: {
HeapType result = HeapType::from_code(code);
if (!VALIDATE(enabled.contains(feature_for_heap_type(result)))) {
- decoder->errorf(
- pc, "invalid heap type '%s', enable with --experimental-wasm-%s",
+ DecodeError<validate>(
+ decoder, pc,
+ "invalid heap type '%s', enable with --experimental-wasm-%s",
result.name().c_str(),
WasmFeatures::name_for_feature(feature_for_heap_type(result)));
return HeapType(HeapType::kBottom);
@@ -166,25 +223,25 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
return result;
}
default:
- if (validate) {
- decoder->errorf(pc, "Unknown heap type %" PRId64, heap_index);
- }
+ DecodeError<validate>(decoder, pc, "Unknown heap type %" PRId64,
+ heap_index);
return HeapType(HeapType::kBottom);
}
UNREACHABLE();
} else {
if (!VALIDATE(enabled.has_typed_funcref())) {
- decoder->error(pc,
- "Invalid indexed heap type, enable with "
- "--experimental-wasm-typed-funcref");
+ DecodeError<validate>(decoder, pc,
+ "Invalid indexed heap type, enable with "
+ "--experimental-wasm-typed-funcref");
return HeapType(HeapType::kBottom);
}
uint32_t type_index = static_cast<uint32_t>(heap_index);
if (!VALIDATE(type_index < kV8MaxWasmTypes)) {
- decoder->errorf(pc,
- "Type index %u is greater than the maximum number %zu "
- "of type definitions supported by V8",
- type_index, kV8MaxWasmTypes);
+ DecodeError<validate>(
+ decoder, pc,
+ "Type index %u is greater than the maximum number %zu "
+ "of type definitions supported by V8",
+ type_index, kV8MaxWasmTypes);
return HeapType(HeapType::kBottom);
}
return HeapType(type_index);
@@ -214,8 +271,9 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
ValueType result = ValueType::Ref(
heap_type, code == kI31RefCode ? kNonNullable : kNullable);
if (!VALIDATE(enabled.contains(feature_for_heap_type(heap_type)))) {
- decoder->errorf(
- pc, "invalid value type '%s', enable with --experimental-wasm-%s",
+ DecodeError<validate>(
+ decoder, pc,
+ "invalid value type '%s', enable with --experimental-wasm-%s",
result.name().c_str(),
WasmFeatures::name_for_feature(feature_for_heap_type(heap_type)));
return kWasmBottom;
@@ -234,10 +292,10 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
case kOptRefCode: {
Nullability nullability = code == kOptRefCode ? kNullable : kNonNullable;
if (!VALIDATE(enabled.has_typed_funcref())) {
- decoder->errorf(pc,
- "Invalid type '(ref%s <heaptype>)', enable with "
- "--experimental-wasm-typed-funcref",
- nullability == kNullable ? " null" : "");
+ DecodeError<validate>(decoder, pc,
+ "Invalid type '(ref%s <heaptype>)', enable with "
+ "--experimental-wasm-typed-funcref",
+ nullability == kNullable ? " null" : "");
return kWasmBottom;
}
HeapType heap_type =
@@ -248,18 +306,20 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
}
case kRttCode: {
if (!VALIDATE(enabled.has_gc())) {
- decoder->error(
- pc, "invalid value type 'rtt', enable with --experimental-wasm-gc");
+ DecodeError<validate>(
+ decoder, pc,
+ "invalid value type 'rtt', enable with --experimental-wasm-gc");
return kWasmBottom;
}
uint32_t depth_length;
uint32_t depth =
decoder->read_u32v<validate>(pc + 1, &depth_length, "depth");
if (!VALIDATE(depth <= kV8MaxRttSubtypingDepth)) {
- decoder->errorf(pc,
- "subtyping depth %u is greater than the maximum depth "
- "%u supported by V8",
- depth, kV8MaxRttSubtypingDepth);
+ DecodeError<validate>(
+ decoder, pc,
+ "subtyping depth %u is greater than the maximum depth "
+ "%u supported by V8",
+ depth, kV8MaxRttSubtypingDepth);
return kWasmBottom;
}
HeapType heap_type = read_heap_type<validate>(
@@ -270,9 +330,9 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
}
case kS128Code: {
if (!VALIDATE(enabled.has_simd())) {
- decoder->error(pc,
- "invalid value type 's128', enable with "
- "--experimental-wasm-simd");
+ DecodeError<validate>(
+ decoder, pc,
+ "invalid value type 's128', enable with --experimental-wasm-simd");
return kWasmBottom;
}
return kWasmS128;
@@ -376,8 +436,9 @@ struct SelectTypeImmediate {
uint8_t num_types =
decoder->read_u32v<validate>(pc, &length, "number of select types");
if (!VALIDATE(num_types == 1)) {
- decoder->error(
- pc + 1, "Invalid number of types. Select accepts exactly one type");
+ DecodeError<validate>(
+ decoder, pc + 1,
+ "Invalid number of types. Select accepts exactly one type");
return;
}
uint32_t type_length;
@@ -385,7 +446,7 @@ struct SelectTypeImmediate {
&type_length, enabled);
length += type_length;
if (!VALIDATE(type != kWasmBottom)) {
- decoder->error(pc + 1, "invalid select type");
+ DecodeError<validate>(decoder, pc + 1, "invalid select type");
}
}
};
@@ -402,18 +463,20 @@ struct BlockTypeImmediate {
int64_t block_type =
decoder->read_i33v<validate>(pc, &length, "block type");
if (block_type < 0) {
- if ((static_cast<uint8_t>(block_type) & byte{0x7f}) == kVoidCode) return;
+ constexpr int64_t kVoidCode_i64_extended = (~int64_t{0x7F}) | kVoidCode;
+ if (block_type == kVoidCode_i64_extended) return;
type = value_type_reader::read_value_type<validate>(decoder, pc, &length,
enabled);
if (!VALIDATE(type != kWasmBottom)) {
- decoder->errorf(pc, "Invalid block type %" PRId64, block_type);
+ DecodeError<validate>(decoder, pc, "Invalid block type %" PRId64,
+ block_type);
}
} else {
if (!VALIDATE(enabled.has_mv())) {
- decoder->errorf(pc,
- "invalid block type %" PRId64
- ", enable with --experimental-wasm-mv",
- block_type);
+ DecodeError<validate>(decoder, pc,
+ "invalid block type %" PRId64
+ ", enable with --experimental-wasm-mv",
+ block_type);
return;
}
type = kWasmBottom;
@@ -480,7 +543,8 @@ struct MemoryIndexImmediate {
inline MemoryIndexImmediate(Decoder* decoder, const byte* pc) {
index = decoder->read_u8<validate>(pc, "memory index");
if (!VALIDATE(index == 0)) {
- decoder->errorf(pc, "expected memory index 0, found %u", index);
+ DecodeError<validate>(decoder, pc, "expected memory index 0, found %u",
+ index);
}
}
};
@@ -543,8 +607,8 @@ struct CallIndirectImmediate {
TableIndexImmediate<validate> table(decoder, pc + len);
if (!VALIDATE((table.index == 0 && table.length == 1) ||
enabled.has_reftypes())) {
- decoder->errorf(pc + len, "expected table index 0, found %u",
- table.index);
+ DecodeError<validate>(decoder, pc + len,
+ "expected table index 0, found %u", table.index);
}
table_index = table.index;
length = len + table.length;
@@ -623,10 +687,11 @@ struct MemoryAccessImmediate {
alignment =
decoder->read_u32v<validate>(pc, &alignment_length, "alignment");
if (!VALIDATE(alignment <= max_alignment)) {
- decoder->errorf(pc,
- "invalid alignment; expected maximum alignment is %u, "
- "actual alignment is %u",
- max_alignment, alignment);
+ DecodeError<validate>(
+ decoder, pc,
+ "invalid alignment; expected maximum alignment is %u, "
+ "actual alignment is %u",
+ max_alignment, alignment);
}
uint32_t offset_length;
offset = decoder->read_u32v<validate>(pc + alignment_length, &offset_length,
@@ -746,12 +811,29 @@ struct HeapTypeImmediate {
}
};
+template <Decoder::ValidateFlag validate>
+struct PcForErrors {
+ PcForErrors(const byte* /* pc */) {}
+
+ const byte* pc() const { return nullptr; }
+};
+
+template <>
+struct PcForErrors<Decoder::kFullValidation> {
+ const byte* pc_for_errors = nullptr;
+
+ PcForErrors(const byte* pc) : pc_for_errors(pc) {}
+
+ const byte* pc() const { return pc_for_errors; }
+};
+
// An entry on the value stack.
-struct ValueBase {
- const byte* pc = nullptr;
+template <Decoder::ValidateFlag validate>
+struct ValueBase : public PcForErrors<validate> {
ValueType type = kWasmStmt;
- ValueBase(const byte* pc, ValueType type) : pc(pc), type(type) {}
+ ValueBase(const byte* pc, ValueType type)
+ : PcForErrors<validate>(pc), type(type) {}
};
template <typename Value>
@@ -794,12 +876,11 @@ enum Reachability : uint8_t {
};
// An entry on the control stack (i.e. if, block, loop, or try).
-template <typename Value>
-struct ControlBase {
+template <typename Value, Decoder::ValidateFlag validate>
+struct ControlBase : public PcForErrors<validate> {
ControlKind kind = kControlBlock;
uint32_t locals_count = 0;
uint32_t stack_depth = 0; // stack height at the beginning of the construct.
- const uint8_t* pc = nullptr;
Reachability reachability = kReachable;
// Values merged into the start or end of this control construct.
@@ -810,10 +891,10 @@ struct ControlBase {
ControlBase(ControlKind kind, uint32_t locals_count, uint32_t stack_depth,
const uint8_t* pc, Reachability reachability)
- : kind(kind),
+ : PcForErrors<validate>(pc),
+ kind(kind),
locals_count(locals_count),
stack_depth(stack_depth),
- pc(pc),
reachability(reachability),
start_merge(reachability == kReachable) {
DCHECK(kind == kControlLet || locals_count == 0);
@@ -904,8 +985,13 @@ struct ControlBase {
F(LoadTransform, LoadType type, LoadTransformationKind transform, \
const MemoryAccessImmediate<validate>& imm, const Value& index, \
Value* result) \
+ F(LoadLane, LoadType type, const Value& value, const Value& index, \
+ const MemoryAccessImmediate<validate>& imm, const uint8_t laneidx, \
+ Value* result) \
F(StoreMem, StoreType type, const MemoryAccessImmediate<validate>& imm, \
const Value& index, const Value& value) \
+ F(StoreLane, StoreType type, const MemoryAccessImmediate<validate>& imm, \
+ const Value& index, const Value& value, const uint8_t laneidx) \
F(CurrentMemoryPages, Value* result) \
F(MemoryGrow, const Value& value, Value* result) \
F(CallDirect, const CallFunctionImmediate<validate>& imm, \
@@ -1035,9 +1121,10 @@ class WasmDecoder : public Decoder {
: local_types_.begin();
// Decode local declarations, if any.
- uint32_t entries = read_u32v<kValidate>(pc, &length, "local decls count");
+ uint32_t entries =
+ read_u32v<kFullValidation>(pc, &length, "local decls count");
if (!VALIDATE(ok())) {
- error(pc + *total_length, "invalid local decls count");
+ DecodeError(pc + *total_length, "invalid local decls count");
return false;
}
@@ -1046,26 +1133,27 @@ class WasmDecoder : public Decoder {
while (entries-- > 0) {
if (!VALIDATE(more())) {
- error(end(), "expected more local decls but reached end of input");
+ DecodeError(end(),
+ "expected more local decls but reached end of input");
return false;
}
- uint32_t count =
- read_u32v<kValidate>(pc + *total_length, &length, "local count");
+ uint32_t count = read_u32v<kFullValidation>(pc + *total_length, &length,
+ "local count");
if (!VALIDATE(ok())) {
- error(pc + *total_length, "invalid local count");
+ DecodeError(pc + *total_length, "invalid local count");
return false;
}
DCHECK_LE(local_types_.size(), kV8MaxWasmFunctionLocals);
if (!VALIDATE(count <= kV8MaxWasmFunctionLocals - local_types_.size())) {
- error(pc + *total_length, "local count too large");
+ DecodeError(pc + *total_length, "local count too large");
return false;
}
*total_length += length;
- ValueType type = value_type_reader::read_value_type<kValidate>(
+ ValueType type = value_type_reader::read_value_type<kFullValidation>(
this, pc + *total_length, &length, enabled_);
if (!VALIDATE(type != kWasmBottom)) {
- error(pc + *total_length, "invalid local type");
+ DecodeError(pc + *total_length, "invalid local type");
return false;
}
*total_length += length;
@@ -1081,6 +1169,13 @@ class WasmDecoder : public Decoder {
return true;
}
+ // Shorthand that forwards to the {DecodeError} functions above, passing our
+ // {validate} flag.
+ template <typename... Args>
+ void DecodeError(Args... args) {
+ wasm::DecodeError<validate>(this, std::forward<Args>(args)...);
+ }
+
static BitVector* AnalyzeLoopAssignment(WasmDecoder* decoder, const byte* pc,
uint32_t locals_count, Zone* zone) {
if (pc >= decoder->end()) return nullptr;
@@ -1138,7 +1233,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, LocalIndexImmediate<validate>& imm) {
if (!VALIDATE(imm.index < num_locals())) {
- errorf(pc, "invalid local index: %u", imm.index);
+ DecodeError(pc, "invalid local index: %u", imm.index);
return false;
}
return true;
@@ -1152,7 +1247,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, ExceptionIndexImmediate<validate>& imm) {
if (!Complete(imm)) {
- errorf(pc, "Invalid exception index: %u", imm.index);
+ DecodeError(pc, "Invalid exception index: %u", imm.index);
return false;
}
return true;
@@ -1160,7 +1255,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, GlobalIndexImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->globals.size())) {
- errorf(pc, "invalid global index: %u", imm.index);
+ DecodeError(pc, "invalid global index: %u", imm.index);
return false;
}
imm.global = &module_->globals[imm.index];
@@ -1176,15 +1271,15 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, StructIndexImmediate<validate>& imm) {
if (Complete(imm)) return true;
- errorf(pc, "invalid struct index: %u", imm.index);
+ DecodeError(pc, "invalid struct index: %u", imm.index);
return false;
}
inline bool Validate(const byte* pc, FieldIndexImmediate<validate>& imm) {
if (!Validate(pc, imm.struct_index)) return false;
if (!VALIDATE(imm.index < imm.struct_index.struct_type->field_count())) {
- errorf(pc + imm.struct_index.length, "invalid field index: %u",
- imm.index);
+ DecodeError(pc + imm.struct_index.length, "invalid field index: %u",
+ imm.index);
return false;
}
return true;
@@ -1198,7 +1293,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, ArrayIndexImmediate<validate>& imm) {
if (!Complete(imm)) {
- errorf(pc, "invalid array index: %u", imm.index);
+ DecodeError(pc, "invalid array index: %u", imm.index);
return false;
}
return true;
@@ -1225,7 +1320,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, CallFunctionImmediate<validate>& imm) {
if (!Complete(imm)) {
- errorf(pc, "invalid function index: %u", imm.index);
+ DecodeError(pc, "invalid function index: %u", imm.index);
return false;
}
return true;
@@ -1242,27 +1337,28 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, CallIndirectImmediate<validate>& imm) {
if (!VALIDATE(imm.table_index < module_->tables.size())) {
- error("call_indirect: table index immediate out of bounds");
+ DecodeError(pc, "call_indirect: table index immediate out of bounds");
return false;
}
ValueType table_type = module_->tables[imm.table_index].type;
if (!VALIDATE(IsSubtypeOf(table_type, kWasmFuncRef, module_))) {
- errorf(pc, "call_indirect: immediate table #%u is not of a function type",
- imm.table_index);
+ DecodeError(
+ pc, "call_indirect: immediate table #%u is not of a function type",
+ imm.table_index);
return false;
}
if (!Complete(imm)) {
- errorf(pc, "invalid signature index: #%u", imm.sig_index);
+ DecodeError(pc, "invalid signature index: #%u", imm.sig_index);
return false;
}
// Check that the dynamic signature for this call is a subtype of the static
// type of the table the function is defined in.
ValueType immediate_type = ValueType::Ref(imm.sig_index, kNonNullable);
if (!VALIDATE(IsSubtypeOf(immediate_type, table_type, module_))) {
- errorf(pc,
- "call_indirect: Immediate signature #%u is not a subtype of "
- "immediate table #%u",
- imm.sig_index, imm.table_index);
+ DecodeError(pc,
+ "call_indirect: Immediate signature #%u is not a subtype of "
+ "immediate table #%u",
+ imm.sig_index, imm.table_index);
}
return true;
}
@@ -1270,7 +1366,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, BranchDepthImmediate<validate>& imm,
size_t control_depth) {
if (!VALIDATE(imm.depth < control_depth)) {
- errorf(pc, "invalid branch depth: %u", imm.depth);
+ DecodeError(pc, "invalid branch depth: %u", imm.depth);
return false;
}
return true;
@@ -1279,8 +1375,8 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, BranchTableImmediate<validate>& imm,
size_t block_depth) {
if (!VALIDATE(imm.table_count <= kV8MaxWasmFunctionBrTableSize)) {
- errorf(pc, "invalid table count (> max br_table size): %u",
- imm.table_count);
+ DecodeError(pc, "invalid table count (> max br_table size): %u",
+ imm.table_count);
return false;
}
return checkAvailable(imm.table_count);
@@ -1324,7 +1420,7 @@ class WasmDecoder : public Decoder {
break;
}
if (!VALIDATE(imm.lane >= 0 && imm.lane < num_lanes)) {
- error(pc, "invalid lane index");
+ DecodeError(pc, "invalid lane index");
return false;
} else {
return true;
@@ -1338,7 +1434,7 @@ class WasmDecoder : public Decoder {
}
// Shuffle indices must be in [0..31] for a 16 lane shuffle.
if (!VALIDATE(max_lane < 2 * kSimd128Size)) {
- error(pc, "invalid shuffle mask");
+ DecodeError(pc, "invalid shuffle mask");
return false;
}
return true;
@@ -1356,8 +1452,8 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, BlockTypeImmediate<validate>& imm) {
if (!Complete(imm)) {
- errorf(pc, "block type index %u out of bounds (%zu types)", imm.sig_index,
- module_->types.size());
+ DecodeError(pc, "block type index %u out of bounds (%zu types)",
+ imm.sig_index, module_->types.size());
return false;
}
return true;
@@ -1365,11 +1461,11 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, FunctionIndexImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->functions.size())) {
- errorf(pc, "invalid function index: %u", imm.index);
+ DecodeError(pc, "invalid function index: %u", imm.index);
return false;
}
if (!VALIDATE(module_->functions[imm.index].declared)) {
- this->errorf(pc, "undeclared reference to function #%u", imm.index);
+ DecodeError(pc, "undeclared reference to function #%u", imm.index);
return false;
}
return true;
@@ -1377,7 +1473,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, MemoryIndexImmediate<validate>& imm) {
if (!VALIDATE(module_->has_memory)) {
- errorf(pc, "memory instruction with no memory");
+ DecodeError(pc, "memory instruction with no memory");
return false;
}
return true;
@@ -1386,7 +1482,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, MemoryInitImmediate<validate>& imm) {
if (!VALIDATE(imm.data_segment_index <
module_->num_declared_data_segments)) {
- errorf(pc, "invalid data segment index: %u", imm.data_segment_index);
+ DecodeError(pc, "invalid data segment index: %u", imm.data_segment_index);
return false;
}
if (!Validate(pc + imm.length - imm.memory.length, imm.memory))
@@ -1396,7 +1492,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, DataDropImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->num_declared_data_segments)) {
- errorf(pc, "invalid data segment index: %u", imm.index);
+ DecodeError(pc, "invalid data segment index: %u", imm.index);
return false;
}
return true;
@@ -1409,7 +1505,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, TableIndexImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->tables.size())) {
- errorf(pc, "invalid table index: %u", imm.index);
+ DecodeError(pc, "invalid table index: %u", imm.index);
return false;
}
return true;
@@ -1417,7 +1513,8 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, TableInitImmediate<validate>& imm) {
if (!VALIDATE(imm.elem_segment_index < module_->elem_segments.size())) {
- errorf(pc, "invalid element segment index: %u", imm.elem_segment_index);
+ DecodeError(pc, "invalid element segment index: %u",
+ imm.elem_segment_index);
return false;
}
if (!Validate(pc + imm.length - imm.table.length, imm.table)) {
@@ -1426,8 +1523,8 @@ class WasmDecoder : public Decoder {
ValueType elem_type = module_->elem_segments[imm.elem_segment_index].type;
if (!VALIDATE(IsSubtypeOf(elem_type, module_->tables[imm.table.index].type,
module_))) {
- errorf(pc, "table %u is not a super-type of %s", imm.table.index,
- elem_type.name().c_str());
+ DecodeError(pc, "table %u is not a super-type of %s", imm.table.index,
+ elem_type.name().c_str());
return false;
}
return true;
@@ -1435,7 +1532,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, ElemDropImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->elem_segments.size())) {
- errorf(pc, "invalid element segment index: %u", imm.index);
+ DecodeError(pc, "invalid element segment index: %u", imm.index);
return false;
}
return true;
@@ -1447,8 +1544,8 @@ class WasmDecoder : public Decoder {
ValueType src_type = module_->tables[imm.table_src.index].type;
if (!VALIDATE(IsSubtypeOf(
src_type, module_->tables[imm.table_dst.index].type, module_))) {
- errorf(pc, "table %u is not a super-type of %s", imm.table_dst.index,
- src_type.name().c_str());
+ DecodeError(pc, "table %u is not a super-type of %s", imm.table_dst.index,
+ src_type.name().c_str());
return false;
}
return true;
@@ -1456,12 +1553,12 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, HeapTypeImmediate<validate>& imm) {
if (!VALIDATE(!imm.type.is_bottom())) {
- error(pc, "invalid heap type");
+ DecodeError(pc, "invalid heap type");
return false;
}
if (!VALIDATE(imm.type.is_generic() ||
module_->has_type(imm.type.ref_index()))) {
- errorf(pc, "Type index %u is out of bounds", imm.type.ref_index());
+ DecodeError(pc, "Type index %u is out of bounds", imm.type.ref_index());
return false;
}
return true;
@@ -1581,10 +1678,8 @@ class WasmDecoder : public Decoder {
case kExprF64Const:
return 9;
case kNumericPrefix: {
- byte numeric_index =
- decoder->read_u8<validate>(pc + 1, "numeric_index");
- WasmOpcode opcode =
- static_cast<WasmOpcode>(kNumericPrefix << 8 | numeric_index);
+ uint32_t length = 0;
+ opcode = decoder->read_prefixed_opcode<validate>(pc, &length);
switch (opcode) {
case kExprI32SConvertSatF32:
case kExprI32UConvertSatF32:
@@ -1594,44 +1689,44 @@ class WasmDecoder : public Decoder {
case kExprI64UConvertSatF32:
case kExprI64SConvertSatF64:
case kExprI64UConvertSatF64:
- return 2;
+ return length;
case kExprMemoryInit: {
- MemoryInitImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ MemoryInitImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprDataDrop: {
- DataDropImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ DataDropImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprMemoryCopy: {
- MemoryCopyImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ MemoryCopyImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprMemoryFill: {
- MemoryIndexImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ MemoryIndexImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprTableInit: {
- TableInitImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ TableInitImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprElemDrop: {
- ElemDropImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ ElemDropImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprTableCopy: {
- TableCopyImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ TableCopyImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprTableGrow:
case kExprTableSize:
case kExprTableFill: {
- TableIndexImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ TableIndexImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
default:
- decoder->error(pc, "invalid numeric opcode");
- return 2;
+ decoder->DecodeError(pc, "invalid numeric opcode");
+ return length;
}
}
case kSimdPrefix: {
@@ -1641,67 +1736,81 @@ class WasmDecoder : public Decoder {
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_SIMD_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
- return 1 + length;
+ return length;
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_SIMD_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
- return 2 + length;
+ return length + 1;
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_SIMD_MEM_OPCODE(DECLARE_OPCODE_CASE)
- FOREACH_SIMD_POST_MVP_MEM_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
{
- MemoryAccessImmediate<validate> imm(decoder, pc + length + 1,
+ MemoryAccessImmediate<validate> imm(decoder, pc + length,
+ UINT32_MAX);
+ return length + imm.length;
+ }
+ case kExprS128Load8Lane:
+ case kExprS128Load16Lane:
+ case kExprS128Load32Lane:
+ case kExprS128Load64Lane:
+ case kExprS128Store8Lane:
+ case kExprS128Store16Lane:
+ case kExprS128Store32Lane:
+ case kExprS128Store64Lane: {
+ MemoryAccessImmediate<validate> imm(decoder, pc + length,
UINT32_MAX);
- return 1 + length + imm.length;
+ // 1 more byte for lane index immediate.
+ return length + imm.length + 1;
}
// Shuffles require a byte per lane, or 16 immediate bytes.
case kExprS128Const:
case kExprI8x16Shuffle:
- return 1 + length + kSimd128Size;
+ return length + kSimd128Size;
default:
- decoder->error(pc, "invalid SIMD opcode");
- return 1 + length;
+ decoder->DecodeError(pc, "invalid SIMD opcode");
+ return length;
}
}
case kAtomicPrefix: {
- byte atomic_index = decoder->read_u8<validate>(pc + 1, "atomic_index");
- WasmOpcode opcode =
- static_cast<WasmOpcode>(kAtomicPrefix << 8 | atomic_index);
+ uint32_t length = 0;
+ opcode = decoder->read_prefixed_opcode<validate>(pc, &length,
+ "atomic_index");
switch (opcode) {
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_ATOMIC_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
{
- MemoryAccessImmediate<validate> imm(decoder, pc + 2, UINT32_MAX);
- return 2 + imm.length;
+ MemoryAccessImmediate<validate> imm(decoder, pc + length,
+ UINT32_MAX);
+ return length + imm.length;
}
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_ATOMIC_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
{
- return 2 + 1;
+ return length + 1;
}
default:
- decoder->error(pc, "invalid Atomics opcode");
- return 2;
+ decoder->DecodeError(pc, "invalid Atomics opcode");
+ return length;
}
}
case kGCPrefix: {
- byte gc_index = decoder->read_u8<validate>(pc + 1, "gc_index");
- WasmOpcode opcode = static_cast<WasmOpcode>(kGCPrefix << 8 | gc_index);
+ uint32_t length = 0;
+ opcode =
+ decoder->read_prefixed_opcode<validate>(pc, &length, "gc_index");
switch (opcode) {
case kExprStructNewWithRtt:
case kExprStructNewDefault: {
- StructIndexImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ StructIndexImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprStructGet:
case kExprStructGetS:
case kExprStructGetU:
case kExprStructSet: {
- FieldIndexImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ FieldIndexImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprArrayNewWithRtt:
case kExprArrayNewDefault:
@@ -1710,39 +1819,39 @@ class WasmDecoder : public Decoder {
case kExprArrayGetU:
case kExprArraySet:
case kExprArrayLen: {
- ArrayIndexImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ ArrayIndexImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprBrOnCast: {
- BranchDepthImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ BranchDepthImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprRttCanon:
case kExprRttSub: {
// TODO(7748): Account for rtt.sub's additional immediates if
// they stick.
HeapTypeImmediate<validate> imm(WasmFeatures::All(), decoder,
- pc + 2);
- return 2 + imm.length;
+ pc + length);
+ return length + imm.length;
}
case kExprI31New:
case kExprI31GetS:
case kExprI31GetU:
- return 2;
+ return length;
case kExprRefTest:
case kExprRefCast: {
HeapTypeImmediate<validate> ht1(WasmFeatures::All(), decoder,
- pc + 2);
+ pc + length);
HeapTypeImmediate<validate> ht2(WasmFeatures::All(), decoder,
- pc + 2 + ht1.length);
- return 2 + ht1.length + ht2.length;
+ pc + length + ht1.length);
+ return length + ht1.length + ht2.length;
}
default:
// This is unreachable except for malformed modules.
- decoder->error(pc, "invalid gc opcode");
- return 2;
+ decoder->DecodeError(pc, "invalid gc opcode");
+ return length;
}
}
default:
@@ -1966,8 +2075,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->consume_bytes(locals_length);
for (uint32_t index = params_count; index < this->num_locals(); index++) {
if (!VALIDATE(this->local_type(index).is_defaultable())) {
- this->errorf(
- this->pc(),
+ this->DecodeError(
"Cannot define function-level local of non-defaultable type %s",
this->local_type(index).name().c_str());
return this->TraceFailed();
@@ -1980,9 +2088,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!VALIDATE(control_.empty())) {
if (control_.size() > 1) {
- this->error(control_.back().pc, "unterminated control structure");
+ this->DecodeError(control_.back().pc(),
+ "unterminated control structure");
} else {
- this->error("function body must end with \"end\" opcode");
+ this->DecodeError("function body must end with \"end\" opcode");
}
return TraceFailed();
}
@@ -1994,19 +2103,24 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
bool TraceFailed() {
- TRACE("wasm-error module+%-6d func+%d: %s\n\n", this->error_.offset(),
- this->GetBufferRelativeOffset(this->error_.offset()),
- this->error_.message().c_str());
+ if (this->error_.offset()) {
+ TRACE("wasm-error module+%-6d func+%d: %s\n\n", this->error_.offset(),
+ this->GetBufferRelativeOffset(this->error_.offset()),
+ this->error_.message().c_str());
+ } else {
+ TRACE("wasm-error: %s\n\n", this->error_.message().c_str());
+ }
return false;
}
const char* SafeOpcodeNameAt(const byte* pc) {
+ if (!pc) return "<null>";
if (pc >= this->end_) return "<end>";
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(opcode));
}
- opcode = this->template read_prefixed_opcode<Decoder::kValidate>(pc);
+ opcode = this->template read_prefixed_opcode<Decoder::kFullValidation>(pc);
return WasmOpcodes::OpcodeName(opcode);
}
@@ -2067,16 +2181,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
bool CheckHasMemory() {
if (!VALIDATE(this->module_->has_memory)) {
- this->error(this->pc_ - 1, "memory instruction with no memory");
- return false;
- }
- return true;
- }
-
- bool CheckHasMemoryForAtomics() {
- if (FLAG_wasm_atomics_on_non_shared_memory && CheckHasMemory()) return true;
- if (!VALIDATE(this->module_->has_shared_memory)) {
- this->error(this->pc_ - 1, "Atomic opcodes used without shared memory");
+ this->DecodeError(this->pc_ - 1, "memory instruction with no memory");
return false;
}
return true;
@@ -2084,7 +2189,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
bool CheckSimdPostMvp(WasmOpcode opcode) {
if (!FLAG_wasm_simd_post_mvp && WasmOpcodes::IsSimdPostMvpOpcode(opcode)) {
- this->error(
+ this->DecodeError(
"simd opcode not available, enable with --wasm-simd-post-mvp");
return false;
}
@@ -2154,41 +2259,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Append(" | ");
for (size_t i = 0; i < decoder_->stack_size(); ++i) {
Value& val = decoder_->stack_[i];
- WasmOpcode val_opcode = static_cast<WasmOpcode>(*val.pc);
- if (WasmOpcodes::IsPrefixOpcode(val_opcode)) {
- val_opcode =
- decoder_->template read_prefixed_opcode<Decoder::kNoValidate>(
- val.pc);
- }
- Append(" %c@%d:%s", val.type.short_name(),
- static_cast<int>(val.pc - decoder_->start_),
- WasmOpcodes::OpcodeName(val_opcode));
- // If the decoder failed, don't try to decode the immediates, as this
- // can trigger a DCHECK failure.
- if (decoder_->failed()) continue;
- switch (val_opcode) {
- case kExprI32Const: {
- ImmI32Immediate<Decoder::kNoValidate> imm(decoder_, val.pc + 1);
- Append("[%d]", imm.value);
- break;
- }
- case kExprLocalGet:
- case kExprLocalSet:
- case kExprLocalTee: {
- LocalIndexImmediate<Decoder::kNoValidate> imm(decoder_, val.pc + 1);
- Append("[%u]", imm.index);
- break;
- }
- case kExprGlobalGet:
- case kExprGlobalSet: {
- GlobalIndexImmediate<Decoder::kNoValidate> imm(decoder_,
- val.pc + 1);
- Append("[%u]", imm.index);
- break;
- }
- default:
- break;
- }
+ Append(" %c", val.type.short_name());
}
}
@@ -2268,16 +2339,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(Catch) {
CHECK_PROTOTYPE_OPCODE(eh);
if (!VALIDATE(!control_.empty())) {
- this->error("catch does not match any try");
+ this->DecodeError("catch does not match any try");
return 0;
}
Control* c = &control_.back();
if (!VALIDATE(c->is_try())) {
- this->error("catch does not match any try");
+ this->DecodeError("catch does not match any try");
return 0;
}
if (!VALIDATE(c->is_incomplete_try())) {
- this->error("catch already present for try");
+ this->DecodeError("catch already present for try");
return 0;
}
c->kind = kControlTryCatch;
@@ -2298,12 +2369,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Control* c = control_at(imm.depth.depth);
Value exception = Pop(0, kWasmExnRef);
const WasmExceptionSig* sig = imm.index.exception->sig;
- size_t value_count = sig->parameter_count();
+ int value_count = static_cast<int>(sig->parameter_count());
// TODO(wasm): This operand stack mutation is an ugly hack to make
// both type checking here as well as environment merging in the
// graph builder interface work out of the box. We should introduce
// special handling for both and do minimal/no stack mutation here.
- for (size_t i = 0; i < value_count; ++i) Push(sig->GetParam(i));
+ EnsureStackSpace(value_count);
+ for (int i = 0; i < value_count; ++i) Push(sig->GetParam(i));
Vector<Value> values(stack_ + c->stack_depth, value_count);
TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
if (this->failed()) return 0;
@@ -2314,7 +2386,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
} else if (check_result == kInvalidStack) {
return 0;
}
- for (int i = static_cast<int>(value_count) - 1; i >= 0; i--) Pop(i);
+ for (int i = value_count - 1; i >= 0; i--) Pop(i);
Value* pexception = Push(kWasmExnRef);
*pexception = exception;
return 1 + imm.length;
@@ -2330,6 +2402,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
if (V8_LIKELY(check_result == kReachableBranch)) {
switch (ref_object.type.kind()) {
+ case ValueType::kBottom:
+ // We are in unreachable code, just forward the bottom value.
case ValueType::kRef: {
Value* result = Push(ref_object.type);
CALL_INTERFACE(PassThrough, ref_object, result);
@@ -2347,7 +2421,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
default:
- this->error(this->pc_, "invalid argument type to br_on_null");
+ this->DecodeError("invalid argument type to br_on_null");
return 0;
}
} else if (check_result == kInvalidStack) {
@@ -2361,8 +2435,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
uint32_t old_local_count = this->num_locals();
- // Temporarily add the let-defined values
- // to the beginning of the function locals.
+ // Temporarily add the let-defined values to the beginning of the function
+ // locals.
uint32_t locals_length;
if (!this->DecodeLocals(this->pc() + 1 + imm.length, &locals_length, 0)) {
return 0;
@@ -2406,16 +2480,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(Else) {
if (!VALIDATE(!control_.empty())) {
- this->error("else does not match any if");
+ this->DecodeError("else does not match any if");
return 0;
}
Control* c = &control_.back();
if (!VALIDATE(c->is_if())) {
- this->error(this->pc_, "else does not match an if");
+ this->DecodeError("else does not match an if");
return 0;
}
if (!VALIDATE(c->is_onearmed_if())) {
- this->error(this->pc_, "else already present for if");
+ this->DecodeError("else already present for if");
return 0;
}
if (!TypeCheckFallThru()) return 0;
@@ -2430,18 +2504,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(End) {
if (!VALIDATE(!control_.empty())) {
- this->error("end does not match any if, try, or block");
+ this->DecodeError("end does not match any if, try, or block");
return 0;
}
Control* c = &control_.back();
if (!VALIDATE(!c->is_incomplete_try())) {
- this->error(this->pc_, "missing catch or catch-all in try");
+ this->DecodeError("missing catch or catch-all in try");
return 0;
}
if (c->is_onearmed_if()) {
if (!VALIDATE(c->end_merge.arity == c->start_merge.arity)) {
- this->error(c->pc,
- "start-arity and end-arity of one-armed if must match");
+ this->DecodeError(
+ c->pc(), "start-arity and end-arity of one-armed if must match");
return 0;
}
if (!TypeCheckOneArmedIf(c)) return 0;
@@ -2457,7 +2531,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (control_.size() == 1) {
// If at the last (implicit) control, check we are at end.
if (!VALIDATE(this->pc_ + 1 == this->end_)) {
- this->error(this->pc_ + 1, "trailing code after function end");
+ this->DecodeError(this->pc_ + 1, "trailing code after function end");
return 0;
}
// The result of the block is the return value.
@@ -2477,7 +2551,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value tval = Pop(0, fval.type);
ValueType type = tval.type == kWasmBottom ? fval.type : tval.type;
if (!VALIDATE(!type.is_reference_type())) {
- this->error("select without type is only valid for value type inputs");
+ this->DecodeError(
+ "select without type is only valid for value type inputs");
return 0;
}
Value* result = Push(type);
@@ -2654,16 +2729,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case ValueType::kOptRef:
CALL_INTERFACE_IF_REACHABLE(UnOp, kExprRefIsNull, value, result);
return 1;
+ case ValueType::kBottom:
+ // We are in unreachable code, the return value does not matter.
case ValueType::kRef:
// For non-nullable references, the result is always false.
CALL_INTERFACE_IF_REACHABLE(I32Const, result, 0);
return 1;
default:
if (validate) {
- this->errorf(this->pc_,
- "invalid argument type to ref.is_null. Expected "
- "reference type, got %s",
- value.type.name().c_str());
+ this->DecodeError(
+ "invalid argument type to ref.is_null. Expected reference type, "
+ "got %s",
+ value.type.name().c_str());
return 0;
}
UNREACHABLE();
@@ -2686,6 +2763,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
Value value = Pop(0);
switch (value.type.kind()) {
+ case ValueType::kBottom:
+ // We are in unreachable code. Forward the bottom value.
case ValueType::kRef: {
Value* result = Push(value.type);
CALL_INTERFACE_IF_REACHABLE(PassThrough, value, result);
@@ -2699,10 +2778,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
default:
if (validate) {
- this->errorf(this->pc_,
- "invalid agrument type to ref.as_non_null: Expected "
- "reference type, got %s",
- value.type.name().c_str());
+ this->DecodeError(
+ "invalid agrument type to ref.as_non_null: Expected reference "
+ "type, got %s",
+ value.type.name().c_str());
}
return 0;
}
@@ -2751,8 +2830,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
GlobalIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(imm.global->mutability)) {
- this->errorf(this->pc_, "immutable global #%u cannot be assigned",
- imm.index);
+ this->DecodeError("immutable global #%u cannot be assigned", imm.index);
return 0;
}
Value value = Pop(0, imm.type);
@@ -2818,7 +2896,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!CheckHasMemory()) return 0;
MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!VALIDATE(this->module_->origin == kWasmOrigin)) {
- this->error("grow_memory is not supported for asmjs modules");
+ this->DecodeError("grow_memory is not supported for asmjs modules");
return 0;
}
Value value = Pop(0, kWasmI32);
@@ -2860,9 +2938,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CallFunctionImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(this->CanReturnCall(imm.sig))) {
- this->errorf(this->pc_, "%s: %s",
- WasmOpcodes::OpcodeName(kExprReturnCall),
- "tail call return types mismatch");
+ this->DecodeError("%s: %s", WasmOpcodes::OpcodeName(kExprReturnCall),
+ "tail call return types mismatch");
return 0;
}
ArgVector args = PopArgs(imm.sig);
@@ -2876,9 +2953,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CallIndirectImmediate<validate> imm(this->enabled_, this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(this->CanReturnCall(imm.sig))) {
- this->errorf(this->pc_, "%s: %s",
- WasmOpcodes::OpcodeName(kExprReturnCallIndirect),
- "tail call return types mismatch");
+ this->DecodeError("%s: %s",
+ WasmOpcodes::OpcodeName(kExprReturnCallIndirect),
+ "tail call return types mismatch");
return 0;
}
Value index = Pop(0, kWasmI32);
@@ -2892,12 +2969,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
Value func_ref = Pop(0);
ValueType func_type = func_ref.type;
- if (!func_type.is_object_reference_type() || !func_type.has_index() ||
- !this->module_->has_signature(func_type.ref_index())) {
- this->errorf(this->pc_,
- "call_ref: Expected function reference on top of stack, "
- "found %s of type %s instead",
- SafeOpcodeNameAt(func_ref.pc), func_type.name().c_str());
+ if (func_type == kWasmBottom) {
+ // We are in unreachable code, maintain the polymorphic stack.
+ return 1;
+ }
+ if (!VALIDATE(func_type.is_object_reference_type() &&
+ func_type.has_index() &&
+ this->module_->has_signature(func_type.ref_index()))) {
+ this->DecodeError(
+ "call_ref: Expected function reference on top of stack, found %s of "
+ "type %s instead",
+ SafeOpcodeNameAt(func_ref.pc()), func_type.name().c_str());
return 0;
}
const FunctionSig* sig = this->module_->signature(func_type.ref_index());
@@ -2913,12 +2995,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(return_call);
Value func_ref = Pop(0);
ValueType func_type = func_ref.type;
- if (!func_type.is_object_reference_type() || !func_type.has_index() ||
- !this->module_->has_signature(func_type.ref_index())) {
- this->errorf(this->pc_,
- "return_call_ref: Expected function reference on top of "
- "found %s of type %s instead",
- SafeOpcodeNameAt(func_ref.pc), func_type.name().c_str());
+ if (func_type == kWasmBottom) {
+ // We are in unreachable code, maintain the polymorphic stack.
+ return 1;
+ }
+ if (!VALIDATE(func_type.is_object_reference_type() &&
+ func_type.has_index() &&
+ this->module_->has_signature(func_type.ref_index()))) {
+ this->DecodeError(
+ "return_call_ref: Expected function reference on top of stack, found "
+ "%s of type %s instead",
+ SafeOpcodeNameAt(func_ref.pc()), func_type.name().c_str());
return 0;
}
const FunctionSig* sig = this->module_->signature(func_type.ref_index());
@@ -2930,10 +3017,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DECODE(Numeric) {
- byte numeric_index =
- this->template read_u8<validate>(this->pc_ + 1, "numeric index");
- WasmOpcode full_opcode =
- static_cast<WasmOpcode>(kNumericPrefix << 8 | numeric_index);
+ uint32_t opcode_length = 0;
+ WasmOpcode full_opcode = this->template read_prefixed_opcode<validate>(
+ this->pc_, &opcode_length, "numeric index");
if (full_opcode == kExprTableGrow || full_opcode == kExprTableSize ||
full_opcode == kExprTableFill) {
CHECK_PROTOTYPE_OPCODE(reftypes);
@@ -2941,7 +3027,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(bulk_memory);
}
trace_msg->AppendOpcode(full_opcode);
- return DecodeNumericOpcode(full_opcode);
+ return DecodeNumericOpcode(full_opcode, opcode_length);
}
DECODE(Simd) {
@@ -2951,25 +3037,25 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->pc_, &opcode_length);
if (!VALIDATE(this->ok())) return 0;
trace_msg->AppendOpcode(full_opcode);
- return DecodeSimdOpcode(full_opcode, 1 + opcode_length);
+ return DecodeSimdOpcode(full_opcode, opcode_length);
}
DECODE(Atomic) {
CHECK_PROTOTYPE_OPCODE(threads);
- byte atomic_index =
- this->template read_u8<validate>(this->pc_ + 1, "atomic index");
- WasmOpcode full_opcode =
- static_cast<WasmOpcode>(kAtomicPrefix << 8 | atomic_index);
+ uint32_t opcode_length = 0;
+ WasmOpcode full_opcode = this->template read_prefixed_opcode<validate>(
+ this->pc_, &opcode_length, "atomic index");
trace_msg->AppendOpcode(full_opcode);
- return DecodeAtomicOpcode(full_opcode);
+ return DecodeAtomicOpcode(full_opcode, opcode_length);
}
DECODE(GC) {
CHECK_PROTOTYPE_OPCODE(gc);
- byte gc_index = this->template read_u8<validate>(this->pc_ + 1, "gc index");
- WasmOpcode full_opcode = static_cast<WasmOpcode>(kGCPrefix << 8 | gc_index);
+ uint32_t opcode_length = 0;
+ WasmOpcode full_opcode = this->template read_prefixed_opcode<validate>(
+ this->pc_, &opcode_length, "gc index");
trace_msg->AppendOpcode(full_opcode);
- return DecodeGCOpcode(full_opcode);
+ return DecodeGCOpcode(full_opcode, opcode_length);
}
#define SIMPLE_PROTOTYPE_CASE(name, opc, sig) \
@@ -2980,7 +3066,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(UnknownOrAsmJs) {
// Deal with special asmjs opcodes.
if (!VALIDATE(is_asmjs_module(this->module_))) {
- this->errorf(this->pc(), "Invalid opcode 0x%x", opcode);
+ this->DecodeError("Invalid opcode 0x%x", opcode);
return 0;
}
const FunctionSig* sig = WasmOpcodes::AsmjsSignature(opcode);
@@ -3108,7 +3194,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
if (!VALIDATE(this->pc_ == this->end_)) {
- this->error("Beyond end of code");
+ this->DecodeError("Beyond end of code");
}
}
@@ -3207,7 +3293,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!CheckHasMemory()) return 0;
MemoryAccessImmediate<validate> imm(this, this->pc_ + prefix_len,
type.size_log_2());
- Value index = Pop(0, kWasmI32);
+ ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ Value index = Pop(0, index_type);
Value* result = Push(type.value_type());
CALL_INTERFACE_IF_REACHABLE(LoadMem, type, imm, index, result);
return prefix_len + imm.length;
@@ -3221,27 +3308,58 @@ class WasmFullDecoder : public WasmDecoder<validate> {
transform == LoadTransformationKind::kExtend ? 3 : type.size_log_2();
MemoryAccessImmediate<validate> imm(this, this->pc_ + opcode_length,
max_alignment);
- Value index = Pop(0, kWasmI32);
+ ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ Value index = Pop(0, index_type);
Value* result = Push(kWasmS128);
CALL_INTERFACE_IF_REACHABLE(LoadTransform, type, transform, imm, index,
result);
return opcode_length + imm.length;
}
+ int DecodeLoadLane(LoadType type, uint32_t opcode_length) {
+ if (!CheckHasMemory()) return 0;
+ MemoryAccessImmediate<validate> mem_imm(this, this->pc_ + opcode_length,
+ type.size_log_2());
+ SimdLaneImmediate<validate> lane_imm(
+ this, this->pc_ + opcode_length + mem_imm.length);
+ Value v128 = Pop(1, kWasmS128);
+ Value index = Pop(0, kWasmI32);
+
+ Value* result = Push(kWasmS128);
+ CALL_INTERFACE_IF_REACHABLE(LoadLane, type, v128, index, mem_imm,
+ lane_imm.lane, result);
+ return opcode_length + mem_imm.length + lane_imm.length;
+ }
+
+ int DecodeStoreLane(StoreType type, uint32_t opcode_length) {
+ if (!CheckHasMemory()) return 0;
+ MemoryAccessImmediate<validate> mem_imm(this, this->pc_ + opcode_length,
+ type.size_log_2());
+ SimdLaneImmediate<validate> lane_imm(
+ this, this->pc_ + opcode_length + mem_imm.length);
+ Value v128 = Pop(1, kWasmS128);
+ Value index = Pop(0, kWasmI32);
+
+ CALL_INTERFACE_IF_REACHABLE(StoreLane, type, mem_imm, index, v128,
+ lane_imm.lane);
+ return opcode_length + mem_imm.length + lane_imm.length;
+ }
+
int DecodeStoreMem(StoreType store, int prefix_len = 1) {
if (!CheckHasMemory()) return 0;
MemoryAccessImmediate<validate> imm(this, this->pc_ + prefix_len,
store.size_log_2());
Value value = Pop(1, store.value_type());
- Value index = Pop(0, kWasmI32);
+ ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ Value index = Pop(0, index_type);
CALL_INTERFACE_IF_REACHABLE(StoreMem, store, imm, index, value);
return prefix_len + imm.length;
}
bool ValidateBrTableTarget(uint32_t target, const byte* pos, int index) {
if (!VALIDATE(target < this->control_.size())) {
- this->errorf(pos, "improper branch in br_table target %u (depth %u)",
- index, target);
+ this->DecodeError(pos, "improper branch in br_table target %u (depth %u)",
+ index, target);
return false;
}
return true;
@@ -3263,10 +3381,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int br_arity = merge->arity;
// First we check if the arities match.
if (!VALIDATE(br_arity == static_cast<int>(result_types->size()))) {
- this->errorf(pos,
- "inconsistent arity in br_table target %u (previous was "
- "%zu, this one is %u)",
- index, result_types->size(), br_arity);
+ this->DecodeError(pos,
+ "inconsistent arity in br_table target %u (previous "
+ "was %zu, this one is %u)",
+ index, result_types->size(), br_arity);
return false;
}
@@ -3277,21 +3395,21 @@ class WasmFullDecoder : public WasmDecoder<validate> {
(*result_types)[i] =
CommonSubtype((*result_types)[i], (*merge)[i].type, this->module_);
if (!VALIDATE((*result_types)[i] != kWasmBottom)) {
- this->errorf(pos,
- "inconsistent type in br_table target %u (previous "
- "was %s, this one is %s)",
- index, type.name().c_str(),
- (*merge)[i].type.name().c_str());
+ this->DecodeError(pos,
+ "inconsistent type in br_table target %u (previous "
+ "was %s, this one is %s)",
+ index, type.name().c_str(),
+ (*merge)[i].type.name().c_str());
return false;
}
} else {
// All target must have the same signature.
if (!VALIDATE((*result_types)[i] == (*merge)[i].type)) {
- this->errorf(pos,
- "inconsistent type in br_table target %u (previous "
- "was %s, this one is %s)",
- index, (*result_types)[i].name().c_str(),
- (*merge)[i].type.name().c_str());
+ this->DecodeError(pos,
+ "inconsistent type in br_table target %u (previous "
+ "was %s, this one is %s)",
+ index, (*result_types)[i].name().c_str(),
+ (*merge)[i].type.name().c_str());
return false;
}
}
@@ -3306,10 +3424,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
static_cast<int>(stack_size()) - control_.back().stack_depth;
// There have to be enough values on the stack.
if (!VALIDATE(available >= br_arity)) {
- this->errorf(this->pc_,
- "expected %u elements on the stack for branch to "
- "@%d, found %u",
- br_arity, startrel(control_.back().pc), available);
+ this->DecodeError(
+ "expected %u elements on the stack for branch to @%d, found %u",
+ br_arity, startrel(control_.back().pc()), available);
return false;
}
Value* stack_values = stack_end_ - br_arity;
@@ -3317,9 +3434,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (int i = 0; i < br_arity; ++i) {
Value& val = stack_values[i];
if (!VALIDATE(IsSubtypeOf(val.type, result_types[i], this->module_))) {
- this->errorf(this->pc_,
- "type error in merge[%u] (expected %s, got %s)", i,
- result_types[i].name().c_str(), val.type.name().c_str());
+ this->DecodeError("type error in merge[%u] (expected %s, got %s)", i,
+ result_types[i].name().c_str(),
+ val.type.name().c_str());
return false;
}
}
@@ -3408,17 +3525,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return DecodeLoadMem(LoadType::kS128Load, opcode_length);
case kExprS128StoreMem:
return DecodeStoreMem(StoreType::kS128Store, opcode_length);
- case kExprS128LoadMem32Zero:
- if (!CheckSimdPostMvp(opcode)) {
- return 0;
- }
+ case kExprS128Load32Zero:
return DecodeLoadTransformMem(LoadType::kI32Load,
LoadTransformationKind::kZeroExtend,
opcode_length);
- case kExprS128LoadMem64Zero:
- if (!CheckSimdPostMvp(opcode)) {
- return 0;
- }
+ case kExprS128Load64Zero:
return DecodeLoadTransformMem(LoadType::kI64Load,
LoadTransformationKind::kZeroExtend,
opcode_length);
@@ -3460,6 +3571,30 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return DecodeLoadTransformMem(LoadType::kI64Load32U,
LoadTransformationKind::kExtend,
opcode_length);
+ case kExprS128Load8Lane: {
+ return DecodeLoadLane(LoadType::kI32Load8S, opcode_length);
+ }
+ case kExprS128Load16Lane: {
+ return DecodeLoadLane(LoadType::kI32Load16S, opcode_length);
+ }
+ case kExprS128Load32Lane: {
+ return DecodeLoadLane(LoadType::kI32Load, opcode_length);
+ }
+ case kExprS128Load64Lane: {
+ return DecodeLoadLane(LoadType::kI64Load, opcode_length);
+ }
+ case kExprS128Store8Lane: {
+ return DecodeStoreLane(StoreType::kI32Store8, opcode_length);
+ }
+ case kExprS128Store16Lane: {
+ return DecodeStoreLane(StoreType::kI32Store16, opcode_length);
+ }
+ case kExprS128Store32Lane: {
+ return DecodeStoreLane(StoreType::kI32Store, opcode_length);
+ }
+ case kExprS128Store64Lane: {
+ return DecodeStoreLane(StoreType::kI64Store, opcode_length);
+ }
case kExprS128Const:
return SimdConstOp(opcode_length);
default: {
@@ -3468,7 +3603,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!VALIDATE(sig != nullptr)) {
- this->error("invalid simd opcode");
+ this->DecodeError("invalid simd opcode");
return 0;
}
ArgVector args = PopArgs(sig);
@@ -3480,98 +3615,98 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
- int DecodeGCOpcode(WasmOpcode opcode) {
+ int DecodeGCOpcode(WasmOpcode opcode, uint32_t opcode_length) {
switch (opcode) {
case kExprStructNewWithRtt: {
- StructIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value rtt = Pop(imm.struct_type->field_count());
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt)) {
- this->errorf(this->pc_,
- "struct.new_with_rtt expected rtt, found %s of type %s",
- SafeOpcodeNameAt(rtt.pc), rtt.type.name().c_str());
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ this->DecodeError(
+ "struct.new_with_rtt expected rtt, found %s of type %s",
+ SafeOpcodeNameAt(rtt.pc()), rtt.type.name().c_str());
return 0;
}
// TODO(7748): Drop this check if {imm} is dropped from the proposal
// à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.heap_representation() == imm.index)) {
- this->errorf(this->pc_,
- "struct.new_with_rtt expected rtt for type %d, found "
- "rtt for type %s",
- imm.index, rtt.type.heap_type().name().c_str());
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ rtt.type.heap_representation() == imm.index)) {
+ this->DecodeError(
+ "struct.new_with_rtt expected rtt for type %d, found rtt for "
+ "type %s",
+ imm.index, rtt.type.heap_type().name().c_str());
return 0;
}
ArgVector args = PopArgs(imm.struct_type);
Value* value = Push(ValueType::Ref(imm.index, kNonNullable));
CALL_INTERFACE_IF_REACHABLE(StructNewWithRtt, imm, rtt, args.begin(),
value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprStructNewDefault: {
- StructIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (validate) {
for (uint32_t i = 0; i < imm.struct_type->field_count(); i++) {
ValueType ftype = imm.struct_type->field(i);
if (!VALIDATE(ftype.is_defaultable())) {
- this->errorf(this->pc_,
- "struct.new_default_with_rtt: struct type %d has "
- "non-defaultable type %s for field %d",
- imm.index, ftype.name().c_str(), i);
+ this->DecodeError(
+ "struct.new_default_with_rtt: struct type %d has "
+ "non-defaultable type %s for field %d",
+ imm.index, ftype.name().c_str(), i);
return 0;
}
}
}
Value rtt = Pop(0);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt)) {
- this->errorf(
- this->pc_,
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ this->DecodeError(
"struct.new_default_with_rtt expected rtt, found %s of type %s",
- SafeOpcodeNameAt(rtt.pc), rtt.type.name().c_str());
+ SafeOpcodeNameAt(rtt.pc()), rtt.type.name().c_str());
return 0;
}
// TODO(7748): Drop this check if {imm} is dropped from the proposal
// à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.heap_representation() == imm.index)) {
- this->errorf(
- this->pc_,
- "struct.new_default_with_rtt expected rtt for type %d, found "
- "rtt for type %s",
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ rtt.type.heap_representation() == imm.index)) {
+ this->DecodeError(
+ "struct.new_default_with_rtt expected rtt for type %d, found rtt "
+ "for type %s",
imm.index, rtt.type.heap_type().name().c_str());
return 0;
}
Value* value = Push(ValueType::Ref(imm.index, kNonNullable));
CALL_INTERFACE_IF_REACHABLE(StructNewDefault, imm, rtt, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprStructGet: {
- FieldIndexImmediate<validate> field(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, field)) return 0;
+ FieldIndexImmediate<validate> field(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, field)) return 0;
ValueType field_type =
field.struct_index.struct_type->field(field.index);
if (!VALIDATE(!field_type.is_packed())) {
- this->error(this->pc_,
- "struct.get used with a field of packed type. "
- "Use struct.get_s or struct.get_u instead.");
+ this->DecodeError(
+ "struct.get used with a field of packed type. Use struct.get_s "
+ "or struct.get_u instead.");
return 0;
}
Value struct_obj =
Pop(0, ValueType::Ref(field.struct_index.index, kNullable));
Value* value = Push(field_type);
CALL_INTERFACE_IF_REACHABLE(StructGet, struct_obj, field, true, value);
- return 2 + field.length;
+ return opcode_length + field.length;
}
case kExprStructGetU:
case kExprStructGetS: {
- FieldIndexImmediate<validate> field(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, field)) return 0;
+ FieldIndexImmediate<validate> field(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, field)) return 0;
ValueType field_type =
field.struct_index.struct_type->field(field.index);
if (!VALIDATE(field_type.is_packed())) {
- this->errorf(this->pc_,
- "%s is only valid for packed struct fields. "
- "Use struct.get instead.",
- WasmOpcodes::OpcodeName(opcode));
+ this->DecodeError(
+ "%s is only valid for packed struct fields. Use struct.get "
+ "instead.",
+ WasmOpcodes::OpcodeName(opcode));
return 0;
}
Value struct_obj =
@@ -3579,39 +3714,42 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value* value = Push(field_type.Unpacked());
CALL_INTERFACE_IF_REACHABLE(StructGet, struct_obj, field,
opcode == kExprStructGetS, value);
- return 2 + field.length;
+ return opcode_length + field.length;
}
case kExprStructSet: {
- FieldIndexImmediate<validate> field(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, field)) return 0;
+ FieldIndexImmediate<validate> field(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, field)) return 0;
const StructType* struct_type = field.struct_index.struct_type;
if (!VALIDATE(struct_type->mutability(field.index))) {
- this->error(this->pc_, "setting immutable struct field");
+ this->DecodeError("setting immutable struct field");
return 0;
}
Value field_value = Pop(1, struct_type->field(field.index).Unpacked());
Value struct_obj =
Pop(0, ValueType::Ref(field.struct_index.index, kNullable));
CALL_INTERFACE_IF_REACHABLE(StructSet, struct_obj, field, field_value);
- return 2 + field.length;
+ return opcode_length + field.length;
}
case kExprArrayNewWithRtt: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value rtt = Pop(2);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt)) {
- this->errorf(this->pc_ + 2,
- "array.new_with_rtt expected rtt, found %s of type %s",
- SafeOpcodeNameAt(rtt.pc), rtt.type.name().c_str());
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ this->DecodeError(
+ this->pc_ + opcode_length,
+ "array.new_with_rtt expected rtt, found %s of type %s",
+ SafeOpcodeNameAt(rtt.pc()), rtt.type.name().c_str());
return 0;
}
// TODO(7748): Drop this check if {imm} is dropped from the proposal
// à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.heap_representation() == imm.index)) {
- this->errorf(this->pc_ + 2,
- "array.new_with_rtt expected rtt for type %d, found "
- "rtt for type %s",
- imm.index, rtt.type.heap_type().name().c_str());
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ rtt.type.heap_representation() == imm.index)) {
+ this->DecodeError(
+ this->pc_ + opcode_length,
+ "array.new_with_rtt expected rtt for type %d, found "
+ "rtt for type %s",
+ imm.index, rtt.type.heap_type().name().c_str());
return 0;
}
Value length = Pop(1, kWasmI32);
@@ -3619,48 +3757,47 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value* value = Push(ValueType::Ref(imm.index, kNonNullable));
CALL_INTERFACE_IF_REACHABLE(ArrayNewWithRtt, imm, length, initial_value,
rtt, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprArrayNewDefault: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->element_type().is_defaultable())) {
- this->errorf(this->pc_,
- "array.new_default_with_rtt: array type %d has "
- "non-defaultable element type %s",
- imm.index,
- imm.array_type->element_type().name().c_str());
+ this->DecodeError(
+ "array.new_default_with_rtt: array type %d has "
+ "non-defaultable element type %s",
+ imm.index, imm.array_type->element_type().name().c_str());
return 0;
}
Value rtt = Pop(1);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt)) {
- this->errorf(
- this->pc_ + 2,
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ this->DecodeError(
+ this->pc_ + opcode_length,
"array.new_default_with_rtt expected rtt, found %s of type %s",
- SafeOpcodeNameAt(rtt.pc), rtt.type.name().c_str());
+ SafeOpcodeNameAt(rtt.pc()), rtt.type.name().c_str());
return 0;
}
// TODO(7748): Drop this check if {imm} is dropped from the proposal
// à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.heap_representation() == imm.index)) {
- this->errorf(this->pc_ + 2,
- "array.new_default_with_rtt expected rtt for type %d, "
- "found rtt for type %s",
- imm.index, rtt.type.heap_type().name().c_str());
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ rtt.type.heap_representation() == imm.index)) {
+ this->DecodeError(this->pc_ + opcode_length,
+ "array.new_default_with_rtt expected rtt for type "
+ "%d, found rtt for type %s",
+ imm.index, rtt.type.heap_type().name().c_str());
return 0;
}
Value length = Pop(0, kWasmI32);
Value* value = Push(ValueType::Ref(imm.index, kNonNullable));
CALL_INTERFACE_IF_REACHABLE(ArrayNewDefault, imm, length, rtt, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprArrayGetS:
case kExprArrayGetU: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->element_type().is_packed())) {
- this->errorf(
- this->pc_,
+ this->DecodeError(
"%s is only valid for packed arrays. Use array.get instead.",
WasmOpcodes::OpcodeName(opcode));
return 0;
@@ -3670,15 +3807,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value* value = Push(imm.array_type->element_type().Unpacked());
CALL_INTERFACE_IF_REACHABLE(ArrayGet, array_obj, imm, index,
opcode == kExprArrayGetS, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprArrayGet: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(!imm.array_type->element_type().is_packed())) {
- this->error(this->pc_,
- "array.get used with a field of packed type. "
- "Use array.get_s or array.get_u instead.");
+ this->DecodeError(
+ "array.get used with a field of packed type. Use array.get_s or "
+ "array.get_u instead.");
return 0;
}
Value index = Pop(1, kWasmI32);
@@ -3686,53 +3823,54 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value* value = Push(imm.array_type->element_type());
CALL_INTERFACE_IF_REACHABLE(ArrayGet, array_obj, imm, index, true,
value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprArraySet: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->mutability())) {
- this->error(this->pc_, "setting element of immutable array");
+ this->DecodeError("setting element of immutable array");
return 0;
}
Value value = Pop(2, imm.array_type->element_type().Unpacked());
Value index = Pop(1, kWasmI32);
Value array_obj = Pop(0, ValueType::Ref(imm.index, kNullable));
CALL_INTERFACE_IF_REACHABLE(ArraySet, array_obj, imm, index, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprArrayLen: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value array_obj = Pop(0, ValueType::Ref(imm.index, kNullable));
Value* value = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(ArrayLen, array_obj, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprI31New: {
Value input = Pop(0, kWasmI32);
Value* value = Push(kWasmI31Ref);
CALL_INTERFACE_IF_REACHABLE(I31New, input, value);
- return 2;
+ return opcode_length;
}
case kExprI31GetS: {
Value i31 = Pop(0, kWasmI31Ref);
Value* value = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(I31GetS, i31, value);
- return 2;
+ return opcode_length;
}
case kExprI31GetU: {
Value i31 = Pop(0, kWasmI31Ref);
Value* value = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(I31GetU, i31, value);
- return 2;
+ return opcode_length;
}
case kExprRttCanon: {
- HeapTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ HeapTypeImmediate<validate> imm(this->enabled_, this,
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value* value = Push(ValueType::Rtt(imm.type, 1));
CALL_INTERFACE_IF_REACHABLE(RttCanon, imm, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprRttSub: {
// TODO(7748): The proposal currently includes additional immediates
@@ -3741,29 +3879,35 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// If these immediates don't get dropped (in the spirit of
// https://github.com/WebAssembly/function-references/pull/31 ),
// implement them here.
- HeapTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ HeapTypeImmediate<validate> imm(this->enabled_, this,
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value parent = Pop(0);
- // TODO(7748): Consider exposing "IsSubtypeOfHeap(HeapType t1, t2)" so
- // we can avoid creating (ref heaptype) wrappers here.
- if (!VALIDATE(parent.type.kind() == ValueType::kRtt &&
- IsSubtypeOf(
- ValueType::Ref(imm.type, kNonNullable),
- ValueType::Ref(parent.type.heap_type(), kNonNullable),
- this->module_))) {
- this->error(this->pc_, "rtt.sub requires a supertype rtt on stack");
- return 0;
+ if (parent.type.is_bottom()) {
+ Push(kWasmBottom);
+ } else {
+ // TODO(7748): Consider exposing "IsSubtypeOfHeap(HeapType t1, t2)" so
+ // we can avoid creating (ref heaptype) wrappers here.
+ if (!VALIDATE(parent.type.is_rtt() &&
+ IsSubtypeOf(ValueType::Ref(imm.type, kNonNullable),
+ ValueType::Ref(parent.type.heap_type(),
+ kNonNullable),
+ this->module_))) {
+ this->DecodeError("rtt.sub requires a supertype rtt on stack");
+ return 0;
+ }
+ Value* value =
+ Push(ValueType::Rtt(imm.type, parent.type.depth() + 1));
+ CALL_INTERFACE_IF_REACHABLE(RttSub, imm, parent, value);
}
- Value* value = Push(ValueType::Rtt(imm.type, parent.type.depth() + 1));
- CALL_INTERFACE_IF_REACHABLE(RttSub, imm, parent, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprRefTest: {
// "Tests whether {obj}'s runtime type is a runtime subtype of {rtt}."
HeapTypeImmediate<validate> obj_type(this->enabled_, this,
- this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, obj_type)) return 0;
- int len = 2 + obj_type.length;
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, obj_type)) return 0;
+ int len = opcode_length + obj_type.length;
HeapTypeImmediate<validate> rtt_type(this->enabled_, this,
this->pc_ + len);
if (!this->Validate(this->pc_ + len, rtt_type)) return 0;
@@ -3772,16 +3916,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!VALIDATE(IsSubtypeOf(ValueType::Ref(rtt_type.type, kNonNullable),
ValueType::Ref(obj_type.type, kNonNullable),
this->module_))) {
- this->errorf(this->pc_,
- "ref.test: rtt type must be subtype of object type");
+ this->DecodeError(
+ "ref.test: rtt type must be subtype of object type");
return 0;
}
Value rtt = Pop(1);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt &&
- rtt.type.heap_type() == rtt_type.type)) {
- this->errorf(this->pc_,
- "ref.test: expected rtt for type %s but got %s",
- rtt_type.type.name().c_str(), rtt.type.name().c_str());
+ if (!VALIDATE(
+ (rtt.type.is_rtt() && rtt.type.heap_type() == rtt_type.type) ||
+ rtt.type == kWasmBottom)) {
+ this->DecodeError("ref.test: expected rtt for type %s but got %s",
+ rtt_type.type.name().c_str(),
+ rtt.type.name().c_str());
return 0;
}
Value obj = Pop(0, ValueType::Ref(obj_type.type, kNullable));
@@ -3791,9 +3936,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprRefCast: {
HeapTypeImmediate<validate> obj_type(this->enabled_, this,
- this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, obj_type)) return 0;
- int len = 2 + obj_type.length;
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, obj_type)) return 0;
+ int len = opcode_length + obj_type.length;
HeapTypeImmediate<validate> rtt_type(this->enabled_, this,
this->pc_ + len);
if (!this->Validate(this->pc_ + len, rtt_type)) return 0;
@@ -3801,16 +3946,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!VALIDATE(IsSubtypeOf(ValueType::Ref(rtt_type.type, kNonNullable),
ValueType::Ref(obj_type.type, kNonNullable),
this->module_))) {
- this->errorf(this->pc_,
- "ref.cast: rtt type must be subtype of object type");
+ this->DecodeError(
+ "ref.cast: rtt type must be subtype of object type");
return 0;
}
Value rtt = Pop(1);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt &&
- rtt.type.heap_type() == rtt_type.type)) {
- this->errorf(this->pc_,
- "ref.cast: expected rtt for type %s but got %s",
- rtt_type.type.name().c_str(), rtt.type.name().c_str());
+ if (!VALIDATE(
+ (rtt.type.is_rtt() && rtt.type.heap_type() == rtt_type.type) ||
+ rtt.type == kWasmBottom)) {
+ this->DecodeError("ref.cast: expected rtt for type %s but got %s",
+ rtt_type.type.name().c_str(),
+ rtt.type.name().c_str());
return 0;
}
Value obj = Pop(0, ValueType::Ref(obj_type.type, kNullable));
@@ -3819,34 +3965,40 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return len;
}
case kExprBrOnCast: {
- BranchDepthImmediate<validate> branch_depth(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, branch_depth, control_.size())) {
+ BranchDepthImmediate<validate> branch_depth(this,
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, branch_depth,
+ control_.size())) {
return 0;
}
// TODO(7748): If the heap type immediates remain in the spec, read
// them here.
Value rtt = Pop(1);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt)) {
- this->error(this->pc_, "br_on_cast[1]: expected rtt on stack");
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ this->DecodeError("br_on_cast[1]: expected rtt on stack");
return 0;
}
Value obj = Pop(0);
- if (!VALIDATE(obj.type.is_object_reference_type())) {
- this->error(this->pc_, "br_on_cast[0]: expected reference on stack");
+ if (!VALIDATE(obj.type.is_object_reference_type() ||
+ rtt.type.is_bottom())) {
+ this->DecodeError("br_on_cast[0]: expected reference on stack");
return 0;
}
// The static type of {obj} must be a supertype of {rtt}'s type.
if (!VALIDATE(
+ rtt.type.is_bottom() || obj.type.is_bottom() ||
IsSubtypeOf(ValueType::Ref(rtt.type.heap_type(), kNonNullable),
ValueType::Ref(obj.type.heap_type(), kNonNullable),
this->module_))) {
- this->error(this->pc_,
- "br_on_cast: rtt type must be a subtype of object type");
+ this->DecodeError(
+ "br_on_cast: rtt type must be a subtype of object type");
return 0;
}
Control* c = control_at(branch_depth.depth);
Value* result_on_branch =
- Push(ValueType::Ref(rtt.type.heap_type(), kNonNullable));
+ Push(rtt.type.is_bottom()
+ ? kWasmBottom
+ : ValueType::Ref(rtt.type.heap_type(), kNonNullable));
TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
if (V8_LIKELY(check_result == kReachableBranch)) {
CALL_INTERFACE(BrOnCast, obj, rtt, result_on_branch,
@@ -3858,19 +4010,19 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Pop(0); // Drop {result_on_branch}, restore original value.
Value* result_on_fallthrough = Push(obj.type);
*result_on_fallthrough = obj;
- return 2 + branch_depth.length;
+ return opcode_length + branch_depth.length;
}
default:
- this->error("invalid gc opcode");
+ this->DecodeError("invalid gc opcode");
return 0;
}
}
- uint32_t DecodeAtomicOpcode(WasmOpcode opcode) {
+ uint32_t DecodeAtomicOpcode(WasmOpcode opcode, uint32_t opcode_length) {
ValueType ret_type;
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!VALIDATE(sig != nullptr)) {
- this->error("invalid atomic opcode");
+ this->DecodeError("invalid atomic opcode");
return 0;
}
MachineType memtype;
@@ -3892,31 +4044,37 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ATOMIC_OP_LIST(CASE_ATOMIC_OP)
#undef CASE_ATOMIC_OP
case kExprAtomicFence: {
- byte zero = this->template read_u8<validate>(this->pc_ + 2, "zero");
+ byte zero =
+ this->template read_u8<validate>(this->pc_ + opcode_length, "zero");
if (!VALIDATE(zero == 0)) {
- this->error(this->pc_ + 2, "invalid atomic operand");
+ this->DecodeError(this->pc_ + opcode_length,
+ "invalid atomic operand");
return 0;
}
CALL_INTERFACE_IF_REACHABLE(AtomicFence);
- return 3;
+ return 1 + opcode_length;
}
default:
- this->error("invalid atomic opcode");
+ this->DecodeError("invalid atomic opcode");
return 0;
}
- if (!CheckHasMemoryForAtomics()) return 0;
+ if (!CheckHasMemory()) return 0;
MemoryAccessImmediate<validate> imm(
- this, this->pc_ + 2, ElementSizeLog2Of(memtype.representation()));
+ this, this->pc_ + opcode_length,
+ ElementSizeLog2Of(memtype.representation()));
+ // TODO(10949): Fix this for memory64 (index type should be kWasmI64
+ // then).
+ CHECK(!this->module_->is_memory64);
ArgVector args = PopArgs(sig);
Value* result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args), imm, result);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
- unsigned DecodeNumericOpcode(WasmOpcode opcode) {
+ unsigned DecodeNumericOpcode(WasmOpcode opcode, uint32_t opcode_length) {
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!VALIDATE(sig != nullptr)) {
- this->error("invalid numeric opcode");
+ this->DecodeError("invalid numeric opcode");
return 0;
}
switch (opcode) {
@@ -3927,88 +4085,90 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprI64SConvertSatF32:
case kExprI64UConvertSatF32:
case kExprI64SConvertSatF64:
- case kExprI64UConvertSatF64:
- return 1 + BuildSimpleOperator(opcode, sig);
+ case kExprI64UConvertSatF64: {
+ BuildSimpleOperator(opcode, sig);
+ return opcode_length;
+ }
case kExprMemoryInit: {
- MemoryInitImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ MemoryInitImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value size = Pop(2, sig->GetParam(2));
Value src = Pop(1, sig->GetParam(1));
Value dst = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(MemoryInit, imm, dst, src, size);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprDataDrop: {
- DataDropImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ DataDropImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
CALL_INTERFACE_IF_REACHABLE(DataDrop, imm);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprMemoryCopy: {
- MemoryCopyImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ MemoryCopyImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value size = Pop(2, sig->GetParam(2));
Value src = Pop(1, sig->GetParam(1));
Value dst = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(MemoryCopy, imm, dst, src, size);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprMemoryFill: {
- MemoryIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ MemoryIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value size = Pop(2, sig->GetParam(2));
Value value = Pop(1, sig->GetParam(1));
Value dst = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(MemoryFill, imm, dst, value, size);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprTableInit: {
- TableInitImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ TableInitImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ArgVector args = PopArgs(sig);
CALL_INTERFACE_IF_REACHABLE(TableInit, imm, VectorOf(args));
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprElemDrop: {
- ElemDropImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ElemDropImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
CALL_INTERFACE_IF_REACHABLE(ElemDrop, imm);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprTableCopy: {
- TableCopyImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ TableCopyImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ArgVector args = PopArgs(sig);
CALL_INTERFACE_IF_REACHABLE(TableCopy, imm, VectorOf(args));
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprTableGrow: {
- TableIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ TableIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value delta = Pop(1, sig->GetParam(1));
Value value = Pop(0, this->module_->tables[imm.index].type);
Value* result = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(TableGrow, imm, value, delta, result);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprTableSize: {
- TableIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ TableIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value* result = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(TableSize, imm, result);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprTableFill: {
- TableIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ TableIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value count = Pop(2, sig->GetParam(2));
Value value = Pop(1, this->module_->tables[imm.index].type);
Value start = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(TableFill, imm, start, value, count);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
default:
- this->error("invalid numeric opcode");
+ this->DecodeError("invalid numeric opcode");
return 0;
}
}
@@ -4087,15 +4247,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// size increase. Not inlining them should not create a performance
// degradation, because their invocations are guarded by V8_LIKELY.
V8_NOINLINE void PopTypeError(int index, Value val, ValueType expected) {
- this->errorf(val.pc, "%s[%d] expected type %s, found %s of type %s",
- SafeOpcodeNameAt(this->pc_), index, expected.name().c_str(),
- SafeOpcodeNameAt(val.pc), val.type.name().c_str());
+ this->DecodeError(val.pc(), "%s[%d] expected type %s, found %s of type %s",
+ SafeOpcodeNameAt(this->pc_), index,
+ expected.name().c_str(), SafeOpcodeNameAt(val.pc()),
+ val.type.name().c_str());
}
V8_NOINLINE void NotEnoughArgumentsError(int index) {
- this->errorf(this->pc_,
- "not enough arguments on the stack for %s, expected %d more",
- SafeOpcodeNameAt(this->pc_), index + 1);
+ this->DecodeError(
+ "not enough arguments on the stack for %s, expected %d more",
+ SafeOpcodeNameAt(this->pc_), index + 1);
}
V8_INLINE Value Pop(int index, ValueType expected) {
@@ -4133,6 +4294,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int index_offset = conditional_branch ? 1 : 0;
for (int i = arity - 1; i >= 0; --i) Pop(index_offset + i, merge[i].type);
// Push values of the correct type back on the stack.
+ EnsureStackSpace(arity);
for (int i = 0; i < arity; ++i) Push(merge[i].type);
return this->ok();
}
@@ -4162,8 +4324,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value& val = stack_values[i];
Value& old = (*merge)[i];
if (!VALIDATE(IsSubtypeOf(val.type, old.type, this->module_))) {
- this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
- i, old.type.name().c_str(), val.type.name().c_str());
+ this->DecodeError("type error in merge[%u] (expected %s, got %s)", i,
+ old.type.name().c_str(), val.type.name().c_str());
return false;
}
}
@@ -4179,8 +4341,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value& start = c->start_merge[i];
Value& end = c->end_merge[i];
if (!VALIDATE(IsSubtypeOf(start.type, end.type, this->module_))) {
- this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
- i, end.type.name().c_str(), start.type.name().c_str());
+ this->DecodeError("type error in merge[%u] (expected %s, got %s)", i,
+ end.type.name().c_str(), start.type.name().c_str());
return false;
}
}
@@ -4197,10 +4359,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t actual = stack_size() - c.stack_depth;
// Fallthrus must match the arity of the control exactly.
if (!VALIDATE(actual == expected)) {
- this->errorf(
- this->pc_,
+ this->DecodeError(
"expected %u elements on the stack for fallthru to @%d, found %u",
- expected, startrel(c.pc), actual);
+ expected, startrel(c.pc()), actual);
return false;
}
if (expected == 0) return true; // Fast path.
@@ -4216,10 +4377,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int available = static_cast<int>(stack_size()) - c.stack_depth;
// For fallthrus, not more than the needed values should be available.
if (!VALIDATE(available <= arity)) {
- this->errorf(
- this->pc_,
+ this->DecodeError(
"expected %u elements on the stack for fallthru to @%d, found %u",
- arity, startrel(c.pc), available);
+ arity, startrel(c.pc()), available);
return false;
}
// Pop all values from the stack for type checking of existing stack
@@ -4246,10 +4406,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t actual =
static_cast<uint32_t>(stack_size()) - control_.back().stack_depth;
if (!VALIDATE(actual >= expected)) {
- this->errorf(
- this->pc_,
+ this->DecodeError(
"expected %u elements on the stack for br to @%d, found %u",
- expected, startrel(c->pc), actual);
+ expected, startrel(c->pc()), actual);
return kInvalidStack;
}
return TypeCheckMergeValues(c, c->br_merge()) ? kReachableBranch
@@ -4270,9 +4429,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int num_available =
static_cast<int>(stack_size()) - control_.back().stack_depth;
if (!VALIDATE(num_available >= num_returns)) {
- this->errorf(this->pc_,
- "expected %u elements on the stack for return, found %u",
- num_returns, num_available);
+ this->DecodeError(
+ "expected %u elements on the stack for return, found %u", num_returns,
+ num_available);
return false;
}
@@ -4283,9 +4442,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value& val = stack_values[i];
ValueType expected_type = this->sig_->GetReturn(i);
if (!VALIDATE(IsSubtypeOf(val.type, expected_type, this->module_))) {
- this->errorf(this->pc_,
- "type error in return[%u] (expected %s, got %s)", i,
- expected_type.name().c_str(), val.type.name().c_str());
+ this->DecodeError("type error in return[%u] (expected %s, got %s)", i,
+ expected_type.name().c_str(),
+ val.type.name().c_str());
return false;
}
}
@@ -4350,9 +4509,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
class EmptyInterface {
public:
- static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
- using Value = ValueBase;
- using Control = ControlBase<Value>;
+ static constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
+ using Value = ValueBase<validate>;
+ using Control = ControlBase<Value, validate>;
using FullDecoder = WasmFullDecoder<validate, EmptyInterface>;
#define DEFINE_EMPTY_CALLBACK(name, ...) \
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index a7471c3a7b..77c84bd615 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -23,8 +23,8 @@ bool DecodeLocalDecls(const WasmFeatures& enabled, BodyLocalDecls* decls,
const byte* start, const byte* end) {
WasmFeatures no_features = WasmFeatures::None();
Zone* zone = decls->type_list.get_allocator().zone();
- WasmDecoder<Decoder::kValidate> decoder(zone, nullptr, enabled, &no_features,
- nullptr, start, end, 0);
+ WasmDecoder<Decoder::kFullValidation> decoder(
+ zone, nullptr, enabled, &no_features, nullptr, start, end, 0);
uint32_t length;
if (!decoder.DecodeLocals(decoder.pc(), &length, 0)) {
decls->encoded_size = 0;
@@ -54,7 +54,7 @@ DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const WasmModule* module, WasmFeatures* detected,
const FunctionBody& body) {
Zone zone(allocator, ZONE_NAME);
- WasmFullDecoder<Decoder::kValidate, EmptyInterface> decoder(
+ WasmFullDecoder<Decoder::kFullValidation, EmptyInterface> decoder(
&zone, module, enabled, detected, body);
decoder.Decode();
return decoder.toResult(nullptr);
@@ -65,9 +65,9 @@ unsigned OpcodeLength(const byte* pc, const byte* end) {
Zone* no_zone = nullptr;
WasmModule* no_module = nullptr;
FunctionSig* no_sig = nullptr;
- WasmDecoder<Decoder::kNoValidate> decoder(no_zone, no_module, no_features,
- &no_features, no_sig, pc, end, 0);
- return WasmDecoder<Decoder::kNoValidate>::OpcodeLength(&decoder, pc);
+ WasmDecoder<Decoder::kNoValidation> decoder(no_zone, no_module, no_features,
+ &no_features, no_sig, pc, end, 0);
+ return WasmDecoder<Decoder::kNoValidation>::OpcodeLength(&decoder, pc);
}
std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
@@ -75,7 +75,7 @@ std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
const byte* pc, const byte* end) {
WasmFeatures unused_detected_features = WasmFeatures::None();
Zone* no_zone = nullptr;
- WasmDecoder<Decoder::kNoValidate> decoder(
+ WasmDecoder<Decoder::kNoValidation> decoder(
no_zone, module, WasmFeatures::All(), &unused_detected_features, sig, pc,
end);
return decoder.StackEffect(pc);
@@ -124,9 +124,9 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
std::ostream& os, std::vector<int>* line_numbers) {
Zone zone(allocator, ZONE_NAME);
WasmFeatures unused_detected_features = WasmFeatures::None();
- WasmDecoder<Decoder::kNoValidate> decoder(&zone, module, WasmFeatures::All(),
- &unused_detected_features, body.sig,
- body.start, body.end);
+ WasmDecoder<Decoder::kNoValidation> decoder(
+ &zone, module, WasmFeatures::All(), &unused_detected_features, body.sig,
+ body.start, body.end);
int line_nr = 0;
constexpr int kNoByteCode = -1;
@@ -174,7 +174,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
unsigned control_depth = 0;
for (; i.has_next(); i.next()) {
unsigned length =
- WasmDecoder<Decoder::kNoValidate>::OpcodeLength(&decoder, i.pc());
+ WasmDecoder<Decoder::kNoValidation>::OpcodeLength(&decoder, i.pc());
unsigned offset = 1;
WasmOpcode opcode = i.current();
@@ -243,8 +243,8 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
case kExprIf:
case kExprBlock:
case kExprTry: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
- i.pc() + 1);
+ BlockTypeImmediate<Decoder::kNoValidation> imm(WasmFeatures::All(), &i,
+ i.pc() + 1);
os << " @" << i.pc_offset();
if (decoder.Complete(imm)) {
for (uint32_t i = 0; i < imm.out_arity(); i++) {
@@ -259,23 +259,23 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
control_depth--;
break;
case kExprBr: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ BranchDepthImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
os << " depth=" << imm.depth;
break;
}
case kExprBrIf: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ BranchDepthImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
os << " depth=" << imm.depth;
break;
}
case kExprBrTable: {
- BranchTableImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ BranchTableImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
os << " entries=" << imm.table_count;
break;
}
case kExprCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
- i.pc() + 1);
+ CallIndirectImmediate<Decoder::kNoValidation> imm(WasmFeatures::All(),
+ &i, i.pc() + 1);
os << " sig #" << imm.sig_index;
if (decoder.Complete(imm)) {
os << ": " << *imm.sig;
@@ -283,7 +283,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
break;
}
case kExprCallFunction: {
- CallFunctionImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ CallFunctionImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
os << " function #" << imm.index;
if (decoder.Complete(imm)) {
os << ": " << *imm.sig;
@@ -304,9 +304,9 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
const byte* start, const byte* end) {
WasmFeatures no_features = WasmFeatures::None();
- WasmDecoder<Decoder::kValidate> decoder(zone, nullptr, no_features,
- &no_features, nullptr, start, end, 0);
- return WasmDecoder<Decoder::kValidate>::AnalyzeLoopAssignment(
+ WasmDecoder<Decoder::kFullValidation> decoder(
+ zone, nullptr, no_features, &no_features, nullptr, start, end, 0);
+ return WasmDecoder<Decoder::kFullValidation>::AnalyzeLoopAssignment(
&decoder, start, static_cast<uint32_t>(num_locals), zone);
}
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 6bc626cb18..d3144c9e46 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -163,7 +163,7 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
WasmOpcode current() {
return static_cast<WasmOpcode>(
- read_u8<Decoder::kNoValidate>(pc_, "expected bytecode"));
+ read_u8<Decoder::kNoValidation>(pc_, "expected bytecode"));
}
void next() {
@@ -176,7 +176,7 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
bool has_next() { return pc_ < end_; }
WasmOpcode prefixed_opcode() {
- return read_prefixed_opcode<Decoder::kNoValidate>(pc_);
+ return read_prefixed_opcode<Decoder::kNoValidation>(pc_);
}
};
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index 8b41a90992..0e4135f03a 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -267,7 +267,6 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
namespace {
bool UseGenericWrapper(const FunctionSig* sig) {
-// Work only for int32 parameters and 1 or 0 return value for now.
#if V8_TARGET_ARCH_X64
if (sig->returns().size() > 1) {
return false;
@@ -295,10 +294,11 @@ bool UseGenericWrapper(const FunctionSig* sig) {
JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit(
Isolate* isolate, WasmEngine* wasm_engine, const FunctionSig* sig,
const WasmModule* module, bool is_import,
- const WasmFeatures& enabled_features)
+ const WasmFeatures& enabled_features, AllowGeneric allow_generic)
: is_import_(is_import),
sig_(sig),
- use_generic_wrapper_(UseGenericWrapper(sig) && !is_import),
+ use_generic_wrapper_(allow_generic && UseGenericWrapper(sig) &&
+ !is_import),
job_(use_generic_wrapper_ ? nullptr
: compiler::NewJSToWasmCompilationJob(
isolate, wasm_engine, sig, module,
@@ -339,7 +339,21 @@ Handle<Code> JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
// Run the compilation unit synchronously.
WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate);
JSToWasmWrapperCompilationUnit unit(isolate, isolate->wasm_engine(), sig,
- module, is_import, enabled_features);
+ module, is_import, enabled_features,
+ kAllowGeneric);
+ unit.Execute();
+ return unit.Finalize(isolate);
+}
+
+// static
+Handle<Code> JSToWasmWrapperCompilationUnit::CompileSpecificJSToWasmWrapper(
+ Isolate* isolate, const FunctionSig* sig, const WasmModule* module) {
+ // Run the compilation unit synchronously.
+ const bool is_import = false;
+ WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate);
+ JSToWasmWrapperCompilationUnit unit(isolate, isolate->wasm_engine(), sig,
+ module, is_import, enabled_features,
+ kDontAllowGeneric);
unit.Execute();
return unit.Finalize(isolate);
}
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index 3d232773e3..4894076303 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -32,6 +32,8 @@ struct WasmFunction;
class WasmInstructionBuffer final {
public:
WasmInstructionBuffer() = delete;
+ WasmInstructionBuffer(const WasmInstructionBuffer&) = delete;
+ WasmInstructionBuffer& operator=(const WasmInstructionBuffer&) = delete;
~WasmInstructionBuffer();
std::unique_ptr<AssemblerBuffer> CreateView();
std::unique_ptr<uint8_t[]> ReleaseBuffer();
@@ -43,9 +45,6 @@ class WasmInstructionBuffer final {
// Override {operator delete} to avoid implicit instantiation of {operator
// delete} with {size_t} argument. The {size_t} argument would be incorrect.
void operator delete(void* ptr) { ::operator delete(ptr); }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(WasmInstructionBuffer);
};
struct WasmCompilationResult {
@@ -113,10 +112,15 @@ STATIC_ASSERT(sizeof(WasmCompilationUnit) <= 2 * kSystemPointerSize);
class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
public:
+ // A flag to mark whether the compilation unit can skip the compilation
+ // and return the builtin (generic) wrapper, when available.
+ enum AllowGeneric : bool { kAllowGeneric = true, kDontAllowGeneric = false };
+
JSToWasmWrapperCompilationUnit(Isolate* isolate, WasmEngine* wasm_engine,
const FunctionSig* sig,
const wasm::WasmModule* module, bool is_import,
- const WasmFeatures& enabled_features);
+ const WasmFeatures& enabled_features,
+ AllowGeneric allow_generic);
~JSToWasmWrapperCompilationUnit();
void Execute();
@@ -131,6 +135,12 @@ class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
const WasmModule* module,
bool is_import);
+ // Run a compilation unit synchronously, but ask for the specific
+ // wrapper.
+ static Handle<Code> CompileSpecificJSToWasmWrapper(Isolate* isolate,
+ const FunctionSig* sig,
+ const WasmModule* module);
+
private:
bool is_import_;
const FunctionSig* sig_;
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 3fc6b066bb..ea071df575 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -74,11 +74,11 @@ constexpr uint32_t kNullCatch = static_cast<uint32_t>(-1);
class WasmGraphBuildingInterface {
public:
- static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
+ static constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
using FullDecoder = WasmFullDecoder<validate, WasmGraphBuildingInterface>;
using CheckForNull = compiler::WasmGraphBuilder::CheckForNull;
- struct Value : public ValueBase {
+ struct Value : public ValueBase<validate> {
TFNode* node = nullptr;
template <typename... Args>
@@ -97,7 +97,7 @@ class WasmGraphBuildingInterface {
explicit TryInfo(SsaEnv* c) : catch_env(c) {}
};
- struct Control : public ControlBase<Value> {
+ struct Control : public ControlBase<Value, validate> {
SsaEnv* end_env = nullptr; // end environment for the construct.
SsaEnv* false_env = nullptr; // false environment (only for if).
TryInfo* try_info = nullptr; // information about try statements.
@@ -436,6 +436,13 @@ class WasmGraphBuildingInterface {
index.node, imm.offset, imm.alignment, decoder->position());
}
+ void LoadLane(FullDecoder* decoder, LoadType type, const Value& value,
+ const Value& index, const MemoryAccessImmediate<validate>& imm,
+ const uint8_t laneidx, Value* result) {
+ result->node = BUILD(LoadLane, type.mem_type(), value.node, index.node,
+ imm.offset, laneidx, decoder->position());
+ }
+
void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm, const Value& index,
const Value& value) {
@@ -443,6 +450,13 @@ class WasmGraphBuildingInterface {
value.node, decoder->position(), type.value_type());
}
+ void StoreLane(FullDecoder* decoder, StoreType type,
+ const MemoryAccessImmediate<validate>& imm, const Value& index,
+ const Value& value, const uint8_t laneidx) {
+ BUILD(StoreLane, type.mem_rep(), index.node, imm.offset, imm.alignment,
+ value.node, laneidx, decoder->position(), type.value_type());
+ }
+
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
result->node = BUILD(CurrentMemoryPages);
}
@@ -1071,33 +1085,20 @@ class WasmGraphBuildingInterface {
BitVector* assigned = WasmDecoder<validate>::AnalyzeLoopAssignment(
decoder, decoder->pc(), decoder->num_locals() + 1, decoder->zone());
if (decoder->failed()) return;
- if (assigned != nullptr) {
- // Only introduce phis for variables assigned in this loop.
- int instance_cache_index = decoder->num_locals();
- for (int i = decoder->num_locals() - 1; i >= 0; i--) {
- if (!assigned->Contains(i)) continue;
- TFNode* inputs[] = {ssa_env_->locals[i], control()};
- ssa_env_->locals[i] = builder_->Phi(decoder->local_type(i), 1, inputs);
- }
- // Introduce phis for instance cache pointers if necessary.
- if (assigned->Contains(instance_cache_index)) {
- builder_->PrepareInstanceCacheForLoop(&ssa_env_->instance_cache,
- control());
- }
+ DCHECK_NOT_NULL(assigned);
- SetEnv(Split(decoder->zone(), ssa_env_));
- builder_->StackCheck(decoder->position());
- return;
- }
-
- // Conservatively introduce phis for all local variables.
+ // Only introduce phis for variables assigned in this loop.
+ int instance_cache_index = decoder->num_locals();
for (int i = decoder->num_locals() - 1; i >= 0; i--) {
+ if (!assigned->Contains(i)) continue;
TFNode* inputs[] = {ssa_env_->locals[i], control()};
ssa_env_->locals[i] = builder_->Phi(decoder->local_type(i), 1, inputs);
}
-
- // Conservatively introduce phis for instance cache.
- builder_->PrepareInstanceCacheForLoop(&ssa_env_->instance_cache, control());
+ // Introduce phis for instance cache pointers if necessary.
+ if (assigned->Contains(instance_cache_index)) {
+ builder_->PrepareInstanceCacheForLoop(&ssa_env_->instance_cache,
+ control());
+ }
SetEnv(Split(decoder->zone(), ssa_env_));
builder_->StackCheck(decoder->position());
@@ -1200,7 +1201,7 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
WasmFeatures* detected, const FunctionBody& body,
compiler::NodeOriginTable* node_origins) {
Zone zone(allocator, ZONE_NAME);
- WasmFullDecoder<Decoder::kValidate, WasmGraphBuildingInterface> decoder(
+ WasmFullDecoder<Decoder::kFullValidation, WasmGraphBuildingInterface> decoder(
&zone, module, enabled, detected, body, builder);
if (node_origins) {
builder->AddBytecodePositionDecorator(node_origins, &decoder);
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
index 075a6e2f25..0d88c4b461 100644
--- a/deps/v8/src/wasm/memory-tracing.cc
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -19,14 +19,13 @@ void TraceMemoryOperation(base::Optional<ExecutionTier> tier,
int position, uint8_t* mem_start) {
EmbeddedVector<char, 91> value;
auto mem_rep = static_cast<MachineRepresentation>(info->mem_rep);
+ Address address = reinterpret_cast<Address>(mem_start) + info->offset;
switch (mem_rep) {
-#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
- case MachineRepresentation::rep: \
- SNPrintF(value, str ":" format, \
- base::ReadLittleEndianValue<ctype1>( \
- reinterpret_cast<Address>(mem_start) + info->address), \
- base::ReadLittleEndianValue<ctype2>( \
- reinterpret_cast<Address>(mem_start) + info->address)); \
+#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
+ case MachineRepresentation::rep: \
+ SNPrintF(value, str ":" format, \
+ base::ReadLittleEndianValue<ctype1>(address), \
+ base::ReadLittleEndianValue<ctype2>(address)); \
break;
TRACE_TYPE(kWord8, " i8", "%d / %02x", uint8_t, uint8_t)
TRACE_TYPE(kWord16, "i16", "%d / %04x", uint16_t, uint16_t)
@@ -37,30 +36,22 @@ void TraceMemoryOperation(base::Optional<ExecutionTier> tier,
#undef TRACE_TYPE
case MachineRepresentation::kSimd128:
SNPrintF(value, "s128:%d %d %d %d / %08x %08x %08x %08x",
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 4),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 8),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 12),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 4),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 8),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 12));
+ base::ReadLittleEndianValue<uint32_t>(address),
+ base::ReadLittleEndianValue<uint32_t>(address + 4),
+ base::ReadLittleEndianValue<uint32_t>(address + 8),
+ base::ReadLittleEndianValue<uint32_t>(address + 12),
+ base::ReadLittleEndianValue<uint32_t>(address),
+ base::ReadLittleEndianValue<uint32_t>(address + 4),
+ base::ReadLittleEndianValue<uint32_t>(address + 8),
+ base::ReadLittleEndianValue<uint32_t>(address + 12));
break;
default:
SNPrintF(value, "???");
}
const char* eng =
tier.has_value() ? ExecutionTierToString(tier.value()) : "?";
- printf("%-11s func:%6d+0x%-6x%s %08x val: %s\n", eng, func_index, position,
- info->is_store ? " store to" : "load from", info->address,
+ printf("%-11s func:%6d+0x%-6x%s %016" PRIuPTR " val: %s\n", eng, func_index,
+ position, info->is_store ? " store to" : "load from", info->offset,
value.begin());
}
diff --git a/deps/v8/src/wasm/memory-tracing.h b/deps/v8/src/wasm/memory-tracing.h
index ca1b2f38c4..f025f07ded 100644
--- a/deps/v8/src/wasm/memory-tracing.h
+++ b/deps/v8/src/wasm/memory-tracing.h
@@ -17,7 +17,7 @@ namespace wasm {
// This struct is create in generated code, hence use low-level types.
struct MemoryTracingInfo {
- uint32_t address;
+ uintptr_t offset;
uint8_t is_store; // 0 or 1
uint8_t mem_rep;
static_assert(
@@ -25,8 +25,10 @@ struct MemoryTracingInfo {
std::underlying_type<MachineRepresentation>::type>::value,
"MachineRepresentation uses uint8_t");
- MemoryTracingInfo(uint32_t addr, bool is_store, MachineRepresentation rep)
- : address(addr), is_store(is_store), mem_rep(static_cast<uint8_t>(rep)) {}
+ MemoryTracingInfo(uintptr_t offset, bool is_store, MachineRepresentation rep)
+ : offset(offset),
+ is_store(is_store),
+ mem_rep(static_cast<uint8_t>(rep)) {}
};
// Callback for tracing a memory operation for debugging.
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 967e092b5b..82f86786a7 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -79,105 +79,24 @@ enum class CompileStrategy : uint8_t {
kDefault = kEager,
};
-// Background compile jobs hold a shared pointer to this token. The token is
-// used to notify them that they should stop. As soon as they see this (after
-// finishing their current compilation unit), they will stop.
-// This allows to already remove the NativeModule without having to synchronize
-// on background compile jobs.
-class BackgroundCompileToken {
- public:
- explicit BackgroundCompileToken(
- const std::shared_ptr<NativeModule>& native_module)
- : native_module_(native_module) {}
-
- void Cancel() {
- base::SharedMutexGuard<base::kExclusive> mutex_guard(
- &compilation_scope_mutex_);
- native_module_.reset();
- }
-
- private:
- friend class BackgroundCompileScope;
-
- std::shared_ptr<NativeModule> StartScope() {
- compilation_scope_mutex_.LockShared();
- return native_module_.lock();
- }
-
- // This private method can only be called via {BackgroundCompileScope}.
- void SchedulePublishCode(NativeModule* native_module,
- std::vector<std::unique_ptr<WasmCode>> codes) {
- {
- base::MutexGuard guard(&publish_mutex_);
- if (publisher_running_) {
- // Add new code to the queue and return.
- publish_queue_.reserve(publish_queue_.size() + codes.size());
- for (auto& c : codes) publish_queue_.emplace_back(std::move(c));
- return;
- }
- publisher_running_ = true;
- }
- while (true) {
- PublishCode(native_module, VectorOf(codes));
- codes.clear();
-
- // Keep publishing new code that came in.
- base::MutexGuard guard(&publish_mutex_);
- DCHECK(publisher_running_);
- if (publish_queue_.empty()) {
- publisher_running_ = false;
- return;
- }
- codes.swap(publish_queue_);
- }
- }
-
- void PublishCode(NativeModule*, Vector<std::unique_ptr<WasmCode>>);
-
- void ExitScope() { compilation_scope_mutex_.UnlockShared(); }
-
- // {compilation_scope_mutex_} protects {native_module_}.
- base::SharedMutex compilation_scope_mutex_;
- std::weak_ptr<NativeModule> native_module_;
-
- // {publish_mutex_} protects {publish_queue_} and {publisher_running_}.
- base::Mutex publish_mutex_;
- std::vector<std::unique_ptr<WasmCode>> publish_queue_;
- bool publisher_running_ = false;
-};
-
class CompilationStateImpl;
-// Keep these scopes short, as they hold the mutex of the token, which
-// sequentializes all these scopes. The mutex is also acquired from foreground
-// tasks, which should not be blocked for a long time.
class BackgroundCompileScope {
public:
- explicit BackgroundCompileScope(
- const std::shared_ptr<BackgroundCompileToken>& token)
- : token_(token.get()), native_module_(token->StartScope()) {}
-
- ~BackgroundCompileScope() { token_->ExitScope(); }
-
- bool cancelled() const { return native_module_ == nullptr; }
+ explicit BackgroundCompileScope(std::weak_ptr<NativeModule> native_module)
+ : native_module_(native_module.lock()) {}
- NativeModule* native_module() {
- DCHECK(!cancelled());
+ NativeModule* native_module() const {
+ DCHECK(native_module_);
return native_module_.get();
}
+ inline CompilationStateImpl* compilation_state() const;
- inline CompilationStateImpl* compilation_state();
-
- // Call {SchedulePublishCode} via the {BackgroundCompileScope} to guarantee
- // that the {NativeModule} stays alive.
- void SchedulePublishCode(std::vector<std::unique_ptr<WasmCode>> codes) {
- token_->SchedulePublishCode(native_module_.get(), std::move(codes));
- }
+ bool cancelled() const;
private:
- BackgroundCompileToken* const token_;
// Keep the native module alive while in this scope.
- std::shared_ptr<NativeModule> const native_module_;
+ std::shared_ptr<NativeModule> native_module_;
};
enum CompileBaselineOnly : bool {
@@ -190,33 +109,74 @@ enum CompileBaselineOnly : bool {
// runs empty.
class CompilationUnitQueues {
public:
- explicit CompilationUnitQueues(int max_tasks, int num_declared_functions)
- : queues_(max_tasks), top_tier_priority_units_queues_(max_tasks) {
- DCHECK_LT(0, max_tasks);
- for (int task_id = 0; task_id < max_tasks; ++task_id) {
- queues_[task_id].next_steal_task_id = next_task_id(task_id);
- }
+ // Public API for QueueImpl.
+ struct Queue {
+ bool ShouldPublish(int num_processed_units) const;
+ };
+
+ explicit CompilationUnitQueues(int num_declared_functions)
+ : num_declared_functions_(num_declared_functions) {
+ // Add one first queue, to add units to.
+ queues_.emplace_back(std::make_unique<QueueImpl>(0));
+
for (auto& atomic_counter : num_units_) {
std::atomic_init(&atomic_counter, size_t{0});
}
- treated_ = std::make_unique<std::atomic<bool>[]>(num_declared_functions);
+ top_tier_compiled_ =
+ std::make_unique<std::atomic<bool>[]>(num_declared_functions);
for (int i = 0; i < num_declared_functions; i++) {
- std::atomic_init(&treated_.get()[i], false);
+ std::atomic_init(&top_tier_compiled_.get()[i], false);
}
}
- base::Optional<WasmCompilationUnit> GetNextUnit(
- int task_id, CompileBaselineOnly baseline_only) {
- DCHECK_LE(0, task_id);
- DCHECK_GT(queues_.size(), task_id);
+ Queue* GetQueueForTask(int task_id) {
+ int required_queues = task_id + 1;
+ {
+ base::SharedMutexGuard<base::kShared> queues_guard(&queues_mutex_);
+ if (V8_LIKELY(static_cast<int>(queues_.size()) >= required_queues)) {
+ return queues_[task_id].get();
+ }
+ }
+
+ // Otherwise increase the number of queues.
+ base::SharedMutexGuard<base::kExclusive> queues_guard(&queues_mutex_);
+ int num_queues = static_cast<int>(queues_.size());
+ while (num_queues < required_queues) {
+ int steal_from = num_queues + 1;
+ queues_.emplace_back(std::make_unique<QueueImpl>(steal_from));
+ ++num_queues;
+ }
+
+ // Update the {publish_limit}s of all queues.
+
+ // We want background threads to publish regularly (to avoid contention when
+ // they are all publishing at the end). On the other side, each publishing
+ // has some overhead (part of it for synchronizing between threads), so it
+ // should not happen *too* often. Thus aim for 4-8 publishes per thread, but
+ // distribute it such that publishing is likely to happen at different
+ // times.
+ int units_per_thread = num_declared_functions_ / num_queues;
+ int min = std::max(10, units_per_thread / 8);
+ int queue_id = 0;
+ for (auto& queue : queues_) {
+ // Set a limit between {min} and {2*min}, but not smaller than {10}.
+ int limit = min + (min * queue_id / num_queues);
+ queue->publish_limit.store(limit, std::memory_order_relaxed);
+ ++queue_id;
+ }
+
+ return queues_[task_id].get();
+ }
+ base::Optional<WasmCompilationUnit> GetNextUnit(
+ Queue* queue, CompileBaselineOnly baseline_only) {
// As long as any lower-tier units are outstanding we need to steal them
// before executing own higher-tier units.
int max_tier = baseline_only ? kBaseline : kTopTier;
for (int tier = GetLowestTierWithUnits(); tier <= max_tier; ++tier) {
- if (auto unit = GetNextUnitOfTier(task_id, tier)) {
+ if (auto unit = GetNextUnitOfTier(queue, tier)) {
size_t old_units_count =
num_units_[tier].fetch_sub(1, std::memory_order_relaxed);
DCHECK_LE(1, old_units_count);
@@ -233,13 +193,18 @@ class CompilationUnitQueues {
DCHECK_LT(0, baseline_units.size() + top_tier_units.size());
// Add to the individual queues in a round-robin fashion. No special care is
// taken to balance them; they will be balanced by work stealing.
- int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed);
- while (!next_queue_to_add.compare_exchange_weak(
- queue_to_add, next_task_id(queue_to_add), std::memory_order_relaxed)) {
- // Retry with updated {queue_to_add}.
+ QueueImpl* queue;
+ {
+ int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed);
+ base::SharedMutexGuard<base::kShared> queues_guard(&queues_mutex_);
+ while (!next_queue_to_add.compare_exchange_weak(
+ queue_to_add, next_task_id(queue_to_add, queues_.size()),
+ std::memory_order_relaxed)) {
+ // Retry with updated {queue_to_add}.
+ }
+ queue = queues_[queue_to_add].get();
}
- Queue* queue = &queues_[queue_to_add];
base::MutexGuard guard(&queue->mutex);
base::Optional<base::MutexGuard> big_units_guard;
for (auto pair : {std::make_pair(int{kBaseline}, baseline_units),
@@ -265,22 +230,24 @@ class CompilationUnitQueues {
}
void AddTopTierPriorityUnit(WasmCompilationUnit unit, size_t priority) {
+ base::SharedMutexGuard<base::kShared> queues_guard(&queues_mutex_);
// Add to the individual queues in a round-robin fashion. No special care is
// taken to balance them; they will be balanced by work stealing. We use
// the same counter for this reason.
int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed);
while (!next_queue_to_add.compare_exchange_weak(
- queue_to_add, next_task_id(queue_to_add), std::memory_order_relaxed)) {
+ queue_to_add, next_task_id(queue_to_add, queues_.size()),
+ std::memory_order_relaxed)) {
// Retry with updated {queue_to_add}.
}
- TopTierPriorityUnitsQueue* queue =
- &top_tier_priority_units_queues_[queue_to_add];
- base::MutexGuard guard(&queue->mutex);
-
+ {
+ auto* queue = queues_[queue_to_add].get();
+ base::MutexGuard guard(&queue->mutex);
+ queue->top_tier_priority_units.emplace(priority, unit);
+ }
num_priority_units_.fetch_add(1, std::memory_order_relaxed);
num_units_[kTopTier].fetch_add(1, std::memory_order_relaxed);
- queue->units.emplace(priority, unit);
}
// Get the current total number of units in all queues. This is only a
@@ -304,15 +271,6 @@ class CompilationUnitQueues {
// order of their function body size.
static constexpr size_t kBigUnitsLimit = 4096;
- struct Queue {
- base::Mutex mutex;
-
- // Protected by {mutex}:
- std::vector<WasmCompilationUnit> units[kNumTiers];
- int next_steal_task_id;
- // End of fields protected by {mutex}.
- };
-
struct BigUnit {
BigUnit(size_t func_size, WasmCompilationUnit unit)
: func_size{func_size}, unit(unit) {}
@@ -351,28 +309,27 @@ class CompilationUnitQueues {
std::priority_queue<BigUnit> units[kNumTiers];
};
- struct TopTierPriorityUnitsQueue {
+ struct QueueImpl : public Queue {
+ explicit QueueImpl(int next_steal_task_id)
+ : next_steal_task_id(next_steal_task_id) {}
+
+ // Number of units after which the task processing this queue should publish
+ // compilation results. Updated (reduced, using relaxed ordering) when new
+ // queues are allocated. If there is only one thread running, we can delay
+ // publishing arbitrarily.
+ std::atomic<int> publish_limit{kMaxInt};
+
base::Mutex mutex;
- // Protected by {mutex}:
- std::priority_queue<TopTierPriorityUnit> units;
+ // All fields below are protected by {mutex}.
+ std::vector<WasmCompilationUnit> units[kNumTiers];
+ std::priority_queue<TopTierPriorityUnit> top_tier_priority_units;
int next_steal_task_id;
- // End of fields protected by {mutex}.
};
- std::vector<Queue> queues_;
- BigUnitsQueue big_units_queue_;
-
- std::vector<TopTierPriorityUnitsQueue> top_tier_priority_units_queues_;
-
- std::atomic<size_t> num_units_[kNumTiers];
- std::atomic<size_t> num_priority_units_{0};
- std::unique_ptr<std::atomic<bool>[]> treated_;
- std::atomic<int> next_queue_to_add{0};
-
- int next_task_id(int task_id) const {
+ int next_task_id(int task_id, size_t num_queues) const {
int next = task_id + 1;
- return next == static_cast<int>(queues_.size()) ? 0 : next;
+ return next == static_cast<int>(num_queues) ? 0 : next;
}
int GetLowestTierWithUnits() const {
@@ -382,13 +339,13 @@ class CompilationUnitQueues {
return kNumTiers;
}
- base::Optional<WasmCompilationUnit> GetNextUnitOfTier(int task_id, int tier) {
- Queue* queue = &queues_[task_id];
+ base::Optional<WasmCompilationUnit> GetNextUnitOfTier(Queue* public_queue,
+ int tier) {
+ QueueImpl* queue = static_cast<QueueImpl*>(public_queue);
- // First check whether there is a priority unit. Execute that
- // first.
+ // First check whether there is a priority unit. Execute that first.
if (tier == kTopTier) {
- if (auto unit = GetTopTierPriorityUnit(task_id)) {
+ if (auto unit = GetTopTierPriorityUnit(queue)) {
return unit;
}
}
@@ -411,12 +368,16 @@ class CompilationUnitQueues {
// Try to steal from all other queues. If this succeeds, return one of the
// stolen units.
- size_t steal_trials = queues_.size();
- for (; steal_trials > 0;
- --steal_trials, steal_task_id = next_task_id(steal_task_id)) {
- if (steal_task_id == task_id) continue;
- if (auto unit = StealUnitsAndGetFirst(task_id, steal_task_id, tier)) {
- return unit;
+ {
+ base::SharedMutexGuard<base::kShared> guard(&queues_mutex_);
+ for (size_t steal_trials = 0; steal_trials < queues_.size();
+ ++steal_trials, ++steal_task_id) {
+ if (steal_task_id >= static_cast<int>(queues_.size())) {
+ steal_task_id = 0;
+ }
+ if (auto unit = StealUnitsAndGetFirst(queue, steal_task_id, tier)) {
+ return unit;
+ }
}
}
@@ -425,7 +386,7 @@ class CompilationUnitQueues {
}
base::Optional<WasmCompilationUnit> GetBigUnitOfTier(int tier) {
- // Fast-path without locking.
+ // Fast path without locking.
if (!big_units_queue_.has_units[tier].load(std::memory_order_relaxed)) {
return {};
}
@@ -439,25 +400,22 @@ class CompilationUnitQueues {
return unit;
}
- base::Optional<WasmCompilationUnit> GetTopTierPriorityUnit(int task_id) {
- // Fast-path without locking.
+ base::Optional<WasmCompilationUnit> GetTopTierPriorityUnit(QueueImpl* queue) {
+ // Fast path without locking.
if (num_priority_units_.load(std::memory_order_relaxed) == 0) {
return {};
}
- TopTierPriorityUnitsQueue* queue =
- &top_tier_priority_units_queues_[task_id];
-
int steal_task_id;
{
base::MutexGuard mutex_guard(&queue->mutex);
- while (!queue->units.empty()) {
- auto unit = queue->units.top().unit;
- queue->units.pop();
+ while (!queue->top_tier_priority_units.empty()) {
+ auto unit = queue->top_tier_priority_units.top().unit;
+ queue->top_tier_priority_units.pop();
num_priority_units_.fetch_sub(1, std::memory_order_relaxed);
- if (!treated_[unit.func_index()].exchange(true,
- std::memory_order_relaxed)) {
+ if (!top_tier_compiled_[unit.func_index()].exchange(
+ true, std::memory_order_relaxed)) {
return unit;
}
num_units_[kTopTier].fetch_sub(1, std::memory_order_relaxed);
@@ -467,28 +425,34 @@ class CompilationUnitQueues {
// Try to steal from all other queues. If this succeeds, return one of the
// stolen units.
- size_t steal_trials = queues_.size();
- for (; steal_trials > 0;
- --steal_trials, steal_task_id = next_task_id(steal_task_id)) {
- if (steal_task_id == task_id) continue;
- if (auto unit = StealTopTierPriorityUnit(task_id, steal_task_id)) {
- return unit;
+ {
+ base::SharedMutexGuard<base::kShared> guard(&queues_mutex_);
+ for (size_t steal_trials = 0; steal_trials < queues_.size();
+ ++steal_trials, ++steal_task_id) {
+ if (steal_task_id >= static_cast<int>(queues_.size())) {
+ steal_task_id = 0;
+ }
+ if (auto unit = StealTopTierPriorityUnit(queue, steal_task_id)) {
+ return unit;
+ }
}
}
return {};
}
- // Steal units of {wanted_tier} from {steal_from_task_id} to {task_id}. Return
+ // Steal units of {wanted_tier} from {steal_from_task_id} to {queue}. Return
// first stolen unit (rest put in queue of {task_id}), or {nullopt} if
// {steal_from_task_id} had no units of {wanted_tier}.
+ // Hold a shared lock on {queues_mutex_} when calling this method.
base::Optional<WasmCompilationUnit> StealUnitsAndGetFirst(
- int task_id, int steal_from_task_id, int wanted_tier) {
- DCHECK_NE(task_id, steal_from_task_id);
+ QueueImpl* queue, int steal_from_task_id, int wanted_tier) {
+ auto* steal_queue = queues_[steal_from_task_id].get();
+ // Cannot steal from own queue.
+ if (steal_queue == queue) return {};
std::vector<WasmCompilationUnit> stolen;
base::Optional<WasmCompilationUnit> returned_unit;
{
- Queue* steal_queue = &queues_[steal_from_task_id];
base::MutexGuard guard(&steal_queue->mutex);
auto* steal_from_vector = &steal_queue->units[wanted_tier];
if (steal_from_vector->empty()) return {};
@@ -498,81 +462,65 @@ class CompilationUnitQueues {
stolen.assign(steal_begin + 1, steal_from_vector->end());
steal_from_vector->erase(steal_begin, steal_from_vector->end());
}
- Queue* queue = &queues_[task_id];
base::MutexGuard guard(&queue->mutex);
auto* target_queue = &queue->units[wanted_tier];
target_queue->insert(target_queue->end(), stolen.begin(), stolen.end());
- queue->next_steal_task_id = next_task_id(steal_from_task_id);
+ queue->next_steal_task_id = steal_from_task_id + 1;
return returned_unit;
}
// Steal one priority unit from {steal_from_task_id} to {task_id}. Return
// stolen unit, or {nullopt} if {steal_from_task_id} had no priority units.
+ // Hold a shared lock on {queues_mutex_} when calling this method.
base::Optional<WasmCompilationUnit> StealTopTierPriorityUnit(
- int task_id, int steal_from_task_id) {
- DCHECK_NE(task_id, steal_from_task_id);
-
+ QueueImpl* queue, int steal_from_task_id) {
+ auto* steal_queue = queues_[steal_from_task_id].get();
+ // Cannot steal from own queue.
+ if (steal_queue == queue) return {};
base::Optional<WasmCompilationUnit> returned_unit;
{
- TopTierPriorityUnitsQueue* steal_queue =
- &top_tier_priority_units_queues_[steal_from_task_id];
base::MutexGuard guard(&steal_queue->mutex);
while (true) {
- if (steal_queue->units.empty()) return {};
+ if (steal_queue->top_tier_priority_units.empty()) return {};
- auto unit = steal_queue->units.top().unit;
- steal_queue->units.pop();
+ auto unit = steal_queue->top_tier_priority_units.top().unit;
+ steal_queue->top_tier_priority_units.pop();
num_priority_units_.fetch_sub(1, std::memory_order_relaxed);
- if (!treated_[unit.func_index()].exchange(true,
- std::memory_order_relaxed)) {
+ if (!top_tier_compiled_[unit.func_index()].exchange(
+ true, std::memory_order_relaxed)) {
returned_unit = unit;
break;
}
num_units_[kTopTier].fetch_sub(1, std::memory_order_relaxed);
}
}
- TopTierPriorityUnitsQueue* queue =
- &top_tier_priority_units_queues_[task_id];
base::MutexGuard guard(&queue->mutex);
- queue->next_steal_task_id = next_task_id(steal_from_task_id);
+ queue->next_steal_task_id = steal_from_task_id + 1;
return returned_unit;
}
-};
-
-// {JobHandle} is not thread safe in general (at least both the
-// {DefaultJobHandle} and chromium's {base::JobHandle} are not). Hence, protect
-// concurrent accesses via a mutex.
-class ThreadSafeJobHandle {
- public:
- explicit ThreadSafeJobHandle(std::shared_ptr<JobHandle> job_handle)
- : job_handle_(std::move(job_handle)) {}
- void NotifyConcurrencyIncrease() {
- base::MutexGuard guard(&mutex_);
- job_handle_->NotifyConcurrencyIncrease();
- }
+ // {queues_mutex_} protectes {queues_};
+ base::SharedMutex queues_mutex_;
+ std::vector<std::unique_ptr<QueueImpl>> queues_;
- void Join() {
- base::MutexGuard guard(&mutex_);
- job_handle_->Join();
- }
+ const int num_declared_functions_;
- void Cancel() {
- base::MutexGuard guard(&mutex_);
- job_handle_->Cancel();
- }
-
- bool IsRunning() const {
- base::MutexGuard guard(&mutex_);
- return job_handle_->IsRunning();
- }
+ BigUnitsQueue big_units_queue_;
- private:
- mutable base::Mutex mutex_;
- std::shared_ptr<JobHandle> job_handle_;
+ std::atomic<size_t> num_units_[kNumTiers];
+ std::atomic<size_t> num_priority_units_{0};
+ std::unique_ptr<std::atomic<bool>[]> top_tier_compiled_;
+ std::atomic<int> next_queue_to_add{0};
};
+bool CompilationUnitQueues::Queue::ShouldPublish(
+ int num_processed_units) const {
+ auto* queue = static_cast<const QueueImpl*>(this);
+ return num_processed_units >=
+ queue->publish_limit.load(std::memory_order_relaxed);
+}
+
// The {CompilationStateImpl} keeps track of the compilation state of the
// owning NativeModule, i.e. which functions are left to be compiled.
// It contains a task manager to allow parallel and asynchronous background
@@ -586,6 +534,7 @@ class CompilationStateImpl {
// Cancel all background compilation, without waiting for compile tasks to
// finish.
void CancelCompilation();
+ bool cancelled() const;
// Initialize compilation progress. Set compilation tiers to expect for
// baseline and top tier compilation. Must be set before {AddCompilationUnits}
@@ -618,8 +567,11 @@ class CompilationStateImpl {
js_to_wasm_wrapper_units);
void AddTopTierCompilationUnit(WasmCompilationUnit);
void AddTopTierPriorityCompilationUnit(WasmCompilationUnit, size_t);
+
+ CompilationUnitQueues::Queue* GetQueueForCompileTask(int task_id);
+
base::Optional<WasmCompilationUnit> GetNextCompilationUnit(
- int task_id, CompileBaselineOnly baseline_only);
+ CompilationUnitQueues::Queue*, CompileBaselineOnly);
std::shared_ptr<JSToWasmWrapperCompilationUnit>
GetNextJSToWasmWrapperCompilationUnit();
@@ -629,13 +581,13 @@ class CompilationStateImpl {
void OnFinishedUnits(Vector<WasmCode*>);
void OnFinishedJSToWasmWrapperUnits(int num);
- int GetFreeCompileTaskId();
- int GetUnpublishedUnitsLimits(int task_id);
- void OnCompilationStopped(int task_id, const WasmFeatures& detected);
+ void OnCompilationStopped(const WasmFeatures& detected);
void PublishDetectedFeatures(Isolate*);
+ void SchedulePublishCompilationResults(
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code);
// Ensure that a compilation job is running, and increase its concurrency if
// needed.
- void ScheduleCompileJobForNewUnits(int new_units);
+ void ScheduleCompileJobForNewUnits();
size_t NumOutstandingCompilations() const;
@@ -687,8 +639,12 @@ class CompilationStateImpl {
// Hold the {callbacks_mutex_} when calling this method.
void TriggerCallbacks(base::EnumSet<CompilationEvent> additional_events = {});
+ void PublishCompilationResults(
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code);
+ void PublishCode(Vector<std::unique_ptr<WasmCode>> codes);
+
NativeModule* const native_module_;
- const std::shared_ptr<BackgroundCompileToken> background_compile_token_;
+ std::weak_ptr<NativeModule> const native_module_weak_;
const CompileMode compile_mode_;
const std::shared_ptr<Counters> async_counters_;
@@ -696,20 +652,9 @@ class CompilationStateImpl {
// using relaxed semantics.
std::atomic<bool> compile_failed_{false};
- // The atomic counter is shared with the compilation job. It's increased if
- // more units are added, and decreased when the queue drops to zero. Hence
- // it's an approximation of the current number of available units in the
- // queue, but it's not updated after popping a single unit, because that
- // would create too much contention.
- // This counter is not used for synchronization, hence relaxed memory ordering
- // can be used. The thread that increases the counter is the same that calls
- // {NotifyConcurrencyIncrease} later. The only reduction of the counter is a
- // drop to zero after a worker does not find any unit in the queue, and after
- // that drop another check is executed to ensure that any left-over units are
- // still processed.
- std::shared_ptr<std::atomic<int>> scheduled_units_approximation_ =
- std::make_shared<std::atomic<int>>(0);
- const int max_compile_concurrency_ = 0;
+ // True if compilation was cancelled and worker threads should return. This
+ // flag can be updated and read using relaxed semantics.
+ std::atomic<bool> compile_cancelled_{false};
CompilationUnitQueues compilation_unit_queues_;
@@ -729,7 +674,7 @@ class CompilationStateImpl {
//////////////////////////////////////////////////////////////////////////////
// Protected by {mutex_}:
- std::shared_ptr<ThreadSafeJobHandle> current_compile_job_;
+ std::shared_ptr<JobHandle> current_compile_job_;
// Features detected to be used in this module. Features can be detected
// as a module is being compiled.
@@ -768,6 +713,11 @@ class CompilationStateImpl {
// End of fields protected by {callbacks_mutex_}.
//////////////////////////////////////////////////////////////////////////////
+ // {publish_mutex_} protects {publish_queue_} and {publisher_running_}.
+ base::Mutex publish_mutex_;
+ std::vector<std::unique_ptr<WasmCode>> publish_queue_;
+ bool publisher_running_ = false;
+
// Encoding of fields in the {compilation_progress_} vector.
using RequiredBaselineTierField = base::BitField8<ExecutionTier, 0, 2>;
using RequiredTopTierField = base::BitField8<ExecutionTier, 2, 2>;
@@ -782,21 +732,14 @@ const CompilationStateImpl* Impl(const CompilationState* compilation_state) {
return reinterpret_cast<const CompilationStateImpl*>(compilation_state);
}
-CompilationStateImpl* BackgroundCompileScope::compilation_state() {
- return Impl(native_module()->compilation_state());
+CompilationStateImpl* BackgroundCompileScope::compilation_state() const {
+ DCHECK(native_module_);
+ return Impl(native_module_->compilation_state());
}
-void BackgroundCompileToken::PublishCode(
- NativeModule* native_module, Vector<std::unique_ptr<WasmCode>> code) {
- WasmCodeRefScope code_ref_scope;
- std::vector<WasmCode*> published_code = native_module->PublishCode(code);
- // Defer logging code in case wire bytes were not fully received yet.
- if (native_module->HasWireBytes()) {
- native_module->engine()->LogCode(VectorOf(published_code));
- }
-
- Impl(native_module->compilation_state())
- ->OnFinishedUnits(VectorOf(published_code));
+bool BackgroundCompileScope::cancelled() const {
+ return native_module_ == nullptr ||
+ Impl(native_module_->compilation_state())->cancelled();
}
void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
@@ -877,8 +820,9 @@ bool CompilationState::recompilation_finished() const {
std::unique_ptr<CompilationState> CompilationState::New(
const std::shared_ptr<NativeModule>& native_module,
std::shared_ptr<Counters> async_counters) {
- return std::unique_ptr<CompilationState>(reinterpret_cast<CompilationState*>(
- new CompilationStateImpl(native_module, std::move(async_counters))));
+ return std::unique_ptr<CompilationState>(
+ reinterpret_cast<CompilationState*>(new CompilationStateImpl(
+ std::move(native_module), std::move(async_counters))));
}
// End of PIMPL implementation of {CompilationState}.
@@ -1215,31 +1159,31 @@ void TriggerTierUp(Isolate* isolate, NativeModule* native_module,
namespace {
void RecordStats(const Code code, Counters* counters) {
- counters->wasm_generated_code_size()->Increment(code.body_size());
+ counters->wasm_generated_code_size()->Increment(code.raw_body_size());
counters->wasm_reloc_size()->Increment(code.relocation_info().length());
}
enum CompilationExecutionResult : int8_t { kNoMoreUnits, kYield };
CompilationExecutionResult ExecuteJSToWasmWrapperCompilationUnits(
- const std::shared_ptr<BackgroundCompileToken>& token,
- JobDelegate* delegate) {
+ std::weak_ptr<NativeModule> native_module, JobDelegate* delegate) {
std::shared_ptr<JSToWasmWrapperCompilationUnit> wrapper_unit = nullptr;
int num_processed_wrappers = 0;
{
- BackgroundCompileScope compile_scope(token);
+ BackgroundCompileScope compile_scope(native_module);
if (compile_scope.cancelled()) return kNoMoreUnits;
wrapper_unit = compile_scope.compilation_state()
->GetNextJSToWasmWrapperCompilationUnit();
if (!wrapper_unit) return kNoMoreUnits;
}
+ TRACE_EVENT0("v8.wasm", "wasm.JSToWasmWrapperCompilation");
while (true) {
wrapper_unit->Execute();
++num_processed_wrappers;
bool yield = delegate && delegate->ShouldYield();
- BackgroundCompileScope compile_scope(token);
+ BackgroundCompileScope compile_scope(native_module);
if (compile_scope.cancelled()) return kNoMoreUnits;
if (yield ||
!(wrapper_unit = compile_scope.compilation_state()
@@ -1251,16 +1195,35 @@ CompilationExecutionResult ExecuteJSToWasmWrapperCompilationUnits(
}
}
+namespace {
+const char* GetCompilationEventName(const WasmCompilationUnit& unit,
+ const CompilationEnv& env) {
+ ExecutionTier tier = unit.tier();
+ if (tier == ExecutionTier::kLiftoff) {
+ return "wasm.BaselineCompilation";
+ }
+ if (tier == ExecutionTier::kTurbofan) {
+ return "wasm.TopTierCompilation";
+ }
+ if (unit.func_index() <
+ static_cast<int>(env.module->num_imported_functions)) {
+ return "wasm.WasmToJSWrapperCompilation";
+ }
+ return "wasm.OtherCompilation";
+}
+} // namespace
+
// Run by the {BackgroundCompileJob} (on any thread).
CompilationExecutionResult ExecuteCompilationUnits(
- const std::shared_ptr<BackgroundCompileToken>& token, Counters* counters,
+ std::weak_ptr<NativeModule> native_module, Counters* counters,
JobDelegate* delegate, CompileBaselineOnly baseline_only) {
TRACE_EVENT0("v8.wasm", "wasm.ExecuteCompilationUnits");
// Execute JS to Wasm wrapper units first, so that they are ready to be
// finalized by the main thread when the kFinishedBaselineCompilation event is
// triggered.
- if (ExecuteJSToWasmWrapperCompilationUnits(token, delegate) == kYield) {
+ if (ExecuteJSToWasmWrapperCompilationUnits(native_module, delegate) ==
+ kYield) {
return kYield;
}
@@ -1270,108 +1233,65 @@ CompilationExecutionResult ExecuteCompilationUnits(
std::shared_ptr<WireBytesStorage> wire_bytes;
std::shared_ptr<const WasmModule> module;
WasmEngine* wasm_engine;
- // The Jobs API guarantees that {GetTaskId} is less than the number of
- // workers, and that the number of workers is less than or equal to the max
- // compile concurrency, which makes the task_id safe to use as an index into
- // the worker queues.
- int task_id = delegate ? delegate->GetTaskId() : 0;
- int unpublished_units_limit;
+ // Task 0 is any main thread (there might be multiple from multiple isolates),
+ // worker threads start at 1 (thus the "+ 1").
+ int task_id = delegate ? (int{delegate->GetTaskId()} + 1) : 0;
+ DCHECK_LE(0, task_id);
+ CompilationUnitQueues::Queue* queue;
base::Optional<WasmCompilationUnit> unit;
WasmFeatures detected_features = WasmFeatures::None();
- auto stop = [&detected_features,
- task_id](BackgroundCompileScope& compile_scope) {
- compile_scope.compilation_state()->OnCompilationStopped(task_id,
- detected_features);
- };
-
// Preparation (synchronized): Initialize the fields above and get the first
// compilation unit.
{
- BackgroundCompileScope compile_scope(token);
+ BackgroundCompileScope compile_scope(native_module);
if (compile_scope.cancelled()) return kNoMoreUnits;
auto* compilation_state = compile_scope.compilation_state();
env.emplace(compile_scope.native_module()->CreateCompilationEnv());
wire_bytes = compilation_state->GetWireBytesStorage();
module = compile_scope.native_module()->shared_module();
wasm_engine = compile_scope.native_module()->engine();
- unpublished_units_limit =
- compilation_state->GetUnpublishedUnitsLimits(task_id);
- unit = compilation_state->GetNextCompilationUnit(task_id, baseline_only);
- if (!unit) {
- stop(compile_scope);
- return kNoMoreUnits;
- }
+ queue = compilation_state->GetQueueForCompileTask(task_id);
+ unit = compilation_state->GetNextCompilationUnit(queue, baseline_only);
+ if (!unit) return kNoMoreUnits;
}
TRACE_COMPILE("ExecuteCompilationUnits (task id %d)\n", task_id);
std::vector<WasmCompilationResult> results_to_publish;
-
- auto publish_results = [&results_to_publish](
- BackgroundCompileScope* compile_scope) {
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
- "wasm.PublishCompilationResults", "num_results",
- results_to_publish.size());
- if (results_to_publish.empty()) return;
- std::vector<std::unique_ptr<WasmCode>> unpublished_code =
- compile_scope->native_module()->AddCompiledCode(
- VectorOf(results_to_publish));
- results_to_publish.clear();
-
- // For import wrapper compilation units, add result to the cache.
- const NativeModule* native_module = compile_scope->native_module();
- int num_imported_functions = native_module->num_imported_functions();
- WasmImportWrapperCache* cache = native_module->import_wrapper_cache();
- for (const auto& code : unpublished_code) {
- int func_index = code->index();
- DCHECK_LE(0, func_index);
- DCHECK_LT(func_index, native_module->num_functions());
- if (func_index < num_imported_functions) {
- const FunctionSig* sig =
- native_module->module()->functions[func_index].sig;
- WasmImportWrapperCache::CacheKey key(
- compiler::kDefaultImportCallKind, sig,
- static_cast<int>(sig->parameter_count()));
- // If two imported functions have the same key, only one of them should
- // have been added as a compilation unit. So it is always the first time
- // we compile a wrapper for this key here.
- DCHECK_NULL((*cache)[key]);
- (*cache)[key] = code.get();
- code->IncRef();
- }
- }
-
- compile_scope->SchedulePublishCode(std::move(unpublished_code));
- };
-
- bool compilation_failed = false;
while (true) {
- // (asynchronous): Execute the compilation.
- WasmCompilationResult result = unit->ExecuteCompilation(
- wasm_engine, &env.value(), wire_bytes, counters, &detected_features);
- results_to_publish.emplace_back(std::move(result));
-
- bool yield = delegate && delegate->ShouldYield();
-
- // (synchronized): Publish the compilation result and get the next unit.
- {
- BackgroundCompileScope compile_scope(token);
+ ExecutionTier current_tier = unit->tier();
+ const char* event_name = GetCompilationEventName(unit.value(), env.value());
+ TRACE_EVENT0("v8.wasm", event_name);
+ while (unit->tier() == current_tier) {
+ // (asynchronous): Execute the compilation.
+ WasmCompilationResult result = unit->ExecuteCompilation(
+ wasm_engine, &env.value(), wire_bytes, counters, &detected_features);
+ results_to_publish.emplace_back(std::move(result));
+
+ bool yield = delegate && delegate->ShouldYield();
+
+ // (synchronized): Publish the compilation result and get the next unit.
+ BackgroundCompileScope compile_scope(native_module);
if (compile_scope.cancelled()) return kNoMoreUnits;
+
if (!results_to_publish.back().succeeded()) {
- // Compile error.
compile_scope.compilation_state()->SetError();
- stop(compile_scope);
- compilation_failed = true;
- break;
+ return kNoMoreUnits;
}
- // Get next unit.
+ // Yield or get next unit.
if (yield ||
!(unit = compile_scope.compilation_state()->GetNextCompilationUnit(
- task_id, baseline_only))) {
- publish_results(&compile_scope);
- stop(compile_scope);
+ queue, baseline_only))) {
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code =
+ compile_scope.native_module()->AddCompiledCode(
+ VectorOf(std::move(results_to_publish)));
+ results_to_publish.clear();
+ compile_scope.compilation_state()->SchedulePublishCompilationResults(
+ std::move(unpublished_code));
+ compile_scope.compilation_state()->OnCompilationStopped(
+ detected_features);
return yield ? kYield : kNoMoreUnits;
}
@@ -1382,17 +1302,17 @@ CompilationExecutionResult ExecuteCompilationUnits(
// Also publish after finishing a certain amount of units, to avoid
// contention when all threads publish at the end.
if (unit->tier() == ExecutionTier::kTurbofan ||
- static_cast<int>(results_to_publish.size()) >=
- unpublished_units_limit) {
- publish_results(&compile_scope);
+ queue->ShouldPublish(static_cast<int>(results_to_publish.size()))) {
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code =
+ compile_scope.native_module()->AddCompiledCode(
+ VectorOf(std::move(results_to_publish)));
+ results_to_publish.clear();
+ compile_scope.compilation_state()->SchedulePublishCompilationResults(
+ std::move(unpublished_code));
}
}
}
- // We only get here if compilation failed. Other exits return directly.
- DCHECK(compilation_failed);
- USE(compilation_failed);
- token->Cancel();
- return kNoMoreUnits;
+ UNREACHABLE();
}
using JSToWasmWrapperKey = std::pair<bool, FunctionSig>;
@@ -1410,7 +1330,8 @@ int AddExportWrapperUnits(Isolate* isolate, WasmEngine* wasm_engine,
if (keys.insert(key).second) {
auto unit = std::make_shared<JSToWasmWrapperCompilationUnit>(
isolate, wasm_engine, function.sig, native_module->module(),
- function.imported, enabled_features);
+ function.imported, enabled_features,
+ JSToWasmWrapperCompilationUnit::kAllowGeneric);
builder->AddJSToWasmWrapperUnit(std::move(unit));
}
}
@@ -1529,6 +1450,7 @@ class CompilationTimeCallback {
histogram->AddSample(static_cast<int>(duration.InMicroseconds()));
}
+ // TODO(sartang@microsoft.com): Remove wall_clock_time_in_us field
v8::metrics::WasmModuleCompiled event{
(compile_mode_ != kSynchronous), // async
(compile_mode_ == kStreaming), // streamed
@@ -1538,7 +1460,8 @@ class CompilationTimeCallback {
true, // success
native_module->liftoff_code_size(), // code_size_in_bytes
native_module->liftoff_bailout_count(), // liftoff_bailout_count
- duration.InMicroseconds() // wall_clock_time_in_us
+ duration.InMicroseconds(), // wall_clock_time_in_us
+ duration.InMicroseconds() // wall_clock_duration_in_us
};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
@@ -1549,7 +1472,8 @@ class CompilationTimeCallback {
v8::metrics::WasmModuleTieredUp event{
FLAG_wasm_lazy_compilation, // lazy
native_module->turbofan_code_size(), // code_size_in_bytes
- duration.InMicroseconds() // wall_clock_time_in_us
+ duration.InMicroseconds(), // wall_clock_time_in_us
+ duration.InMicroseconds() // wall_clock_duration_in_us
};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
@@ -1563,7 +1487,8 @@ class CompilationTimeCallback {
false, // success
native_module->liftoff_code_size(), // code_size_in_bytes
native_module->liftoff_bailout_count(), // liftoff_bailout_count
- duration.InMicroseconds() // wall_clock_time_in_us
+ duration.InMicroseconds(), // wall_clock_time_in_us
+ duration.InMicroseconds() // wall_clock_duration_in_us
};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
@@ -1646,55 +1571,33 @@ void CompileNativeModule(Isolate* isolate,
}
}
-// The runnable task that performs compilations in the background.
-class BackgroundCompileJob : public JobTask {
+class BackgroundCompileJob final : public JobTask {
public:
- explicit BackgroundCompileJob(
- std::shared_ptr<BackgroundCompileToken> token,
- std::shared_ptr<Counters> async_counters,
- std::shared_ptr<std::atomic<int>> scheduled_units_approximation,
- size_t max_concurrency)
- : token_(std::move(token)),
- async_counters_(std::move(async_counters)),
- scheduled_units_approximation_(
- std::move(scheduled_units_approximation)),
- max_concurrency_(max_concurrency) {}
+ explicit BackgroundCompileJob(std::weak_ptr<NativeModule> native_module,
+ std::shared_ptr<Counters> async_counters)
+ : native_module_(std::move(native_module)),
+ async_counters_(std::move(async_counters)) {}
void Run(JobDelegate* delegate) override {
- if (ExecuteCompilationUnits(token_, async_counters_.get(), delegate,
- kBaselineOrTopTier) == kYield) {
- return;
- }
- // Otherwise we didn't find any more units to execute. Reduce the atomic
- // counter of the approximated number of available units to zero, but then
- // check whether any more units were added in the meantime, and increase
- // back if necessary.
- scheduled_units_approximation_->store(0, std::memory_order_relaxed);
-
- BackgroundCompileScope scope(token_);
- if (scope.cancelled()) return;
- size_t outstanding_units =
- scope.compilation_state()->NumOutstandingCompilations();
- if (outstanding_units == 0) return;
- // On a race between this thread and the thread which scheduled the units,
- // this might increase concurrency more than needed, which is fine. It
- // will be reduced again when the first task finds no more work to do.
- scope.compilation_state()->ScheduleCompileJobForNewUnits(
- static_cast<int>(outstanding_units));
+ ExecuteCompilationUnits(native_module_, async_counters_.get(), delegate,
+ kBaselineOrTopTier);
}
size_t GetMaxConcurrency(size_t worker_count) const override {
- // {current_concurrency_} does not reflect the units that running workers
- // are processing, thus add the current worker count to that number.
- return std::min(max_concurrency_,
- worker_count + scheduled_units_approximation_->load());
+ BackgroundCompileScope scope(native_module_);
+ if (scope.cancelled()) return 0;
+ // NumOutstandingCompilations() does not reflect the units that running
+ // workers are processing, thus add the current worker count to that number.
+ size_t flag_limit =
+ static_cast<size_t>(std::max(1, FLAG_wasm_num_compilation_tasks));
+ return std::min(
+ flag_limit,
+ worker_count + scope.compilation_state()->NumOutstandingCompilations());
}
private:
- const std::shared_ptr<BackgroundCompileToken> token_;
+ const std::weak_ptr<NativeModule> native_module_;
const std::shared_ptr<Counters> async_counters_;
- const std::shared_ptr<std::atomic<int>> scheduled_units_approximation_;
- const size_t max_concurrency_;
};
} // namespace
@@ -1974,7 +1877,8 @@ void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
!compilation_state->failed(), // success
native_module_->liftoff_code_size(), // code_size_in_bytes
native_module_->liftoff_bailout_count(), // liftoff_bailout_count
- duration.InMicroseconds() // wall_clock_time_in_us
+ duration.InMicroseconds(), // wall_clock_time_in_us
+ duration.InMicroseconds() // wall_clock_duration_in_us
};
isolate_->metrics_recorder()->DelayMainThreadEvent(event, context_id_);
}
@@ -2489,6 +2393,7 @@ void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(
job_->metrics_event_.module_size_in_bytes = job_->wire_bytes_.length();
job_->metrics_event_.function_count = num_functions_;
job_->metrics_event_.wall_clock_time_in_us = duration.InMicroseconds();
+ job_->metrics_event_.wall_clock_duration_in_us = duration.InMicroseconds();
job_->isolate_->metrics_recorder()->DelayMainThreadEvent(job_->metrics_event_,
job_->context_id_);
@@ -2580,6 +2485,8 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
return false;
}
+ decoder_.set_code_section(offset, static_cast<uint32_t>(code_section_length));
+
prefix_hash_ = base::hash_combine(prefix_hash_,
static_cast<uint32_t>(code_section_length));
if (!wasm_engine_->GetStreamingCompilationOwnership(prefix_hash_)) {
@@ -2601,7 +2508,6 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
job_->DoImmediately<AsyncCompileJob::PrepareAndStartCompile>(
decoder_.shared_module(), false, code_size_estimate);
- decoder_.set_code_section(offset, static_cast<uint32_t>(code_section_length));
auto* compilation_state = Impl(job_->native_module_->compilation_state());
compilation_state->SetWireBytesStorage(std::move(wire_bytes_storage));
DCHECK_EQ(job_->native_module_->module()->origin, kWasmOrigin);
@@ -2710,6 +2616,7 @@ void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
job_->metrics_event_.module_size_in_bytes = job_->wire_bytes_.length();
job_->metrics_event_.function_count = num_functions_;
job_->metrics_event_.wall_clock_time_in_us = duration.InMicroseconds();
+ job_->metrics_event_.wall_clock_duration_in_us = duration.InMicroseconds();
job_->isolate_->metrics_recorder()->DelayMainThreadEvent(job_->metrics_event_,
job_->context_id_);
@@ -2804,37 +2711,31 @@ bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
return true;
}
-// TODO(wasm): Try to avoid the {NumberOfWorkerThreads} calls, grow queues
-// dynamically instead.
-int GetMaxCompileConcurrency() {
- int num_worker_threads = V8::GetCurrentPlatform()->NumberOfWorkerThreads();
- return std::min(FLAG_wasm_num_compilation_tasks, num_worker_threads);
-}
-
CompilationStateImpl::CompilationStateImpl(
const std::shared_ptr<NativeModule>& native_module,
std::shared_ptr<Counters> async_counters)
: native_module_(native_module.get()),
- background_compile_token_(
- std::make_shared<BackgroundCompileToken>(native_module)),
+ native_module_weak_(std::move(native_module)),
compile_mode_(FLAG_wasm_tier_up &&
native_module->module()->origin == kWasmOrigin
? CompileMode::kTiering
: CompileMode::kRegular),
async_counters_(std::move(async_counters)),
- max_compile_concurrency_(std::max(GetMaxCompileConcurrency(), 1)),
- // Add one to the allowed number of parallel tasks, because the foreground
- // task sometimes also contributes.
- compilation_unit_queues_(max_compile_concurrency_ + 1,
- native_module->num_functions()) {}
+ compilation_unit_queues_(native_module->num_functions()) {}
void CompilationStateImpl::CancelCompilation() {
- background_compile_token_->Cancel();
// No more callbacks after abort.
base::MutexGuard callbacks_guard(&callbacks_mutex_);
+ // std::memory_order_relaxed is sufficient because no other state is
+ // synchronized with |compile_cancelled_|.
+ compile_cancelled_.store(true, std::memory_order_relaxed);
callbacks_.clear();
}
+bool CompilationStateImpl::cancelled() const {
+ return compile_cancelled_.load(std::memory_order_relaxed);
+}
+
void CompilationStateImpl::InitializeCompilationProgress(
bool lazy_module, int num_import_wrappers, int num_export_wrappers) {
DCHECK(!failed());
@@ -2909,6 +2810,9 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization() {
RequiredBaselineTierField::encode(ExecutionTier::kTurbofan) |
RequiredTopTierField::encode(ExecutionTier::kTurbofan) |
ReachedTierField::encode(ExecutionTier::kTurbofan);
+ finished_events_.Add(CompilationEvent::kFinishedExportWrappers);
+ finished_events_.Add(CompilationEvent::kFinishedBaselineCompilation);
+ finished_events_.Add(CompilationEvent::kFinishedTopTierCompilation);
compilation_progress_.assign(module->num_declared_functions,
kProgressAfterDeserialization);
}
@@ -2956,7 +2860,9 @@ void CompilationStateImpl::InitializeRecompilation(
// start yet, and new code will be kept tiered-down from the start. For
// streaming compilation, there is a special path to tier down later, when
// the module is complete. In any case, we don't need to recompile here.
+ base::Optional<CompilationUnitBuilder> builder;
if (compilation_progress_.size() > 0) {
+ builder.emplace(native_module_);
const WasmModule* module = native_module_->module();
DCHECK_EQ(module->num_declared_functions, compilation_progress_.size());
DCHECK_GE(module->num_declared_functions,
@@ -2971,15 +2877,13 @@ void CompilationStateImpl::InitializeRecompilation(
: ExecutionTier::kTurbofan;
int imported = module->num_imported_functions;
// Generate necessary compilation units on the fly.
- CompilationUnitBuilder builder(native_module_);
for (int function_index : recompile_function_indexes) {
DCHECK_LE(imported, function_index);
int slot_index = function_index - imported;
auto& progress = compilation_progress_[slot_index];
progress = MissingRecompilationField::update(progress, true);
- builder.AddRecompilationUnit(function_index, new_tier);
+ builder->AddRecompilationUnit(function_index, new_tier);
}
- builder.Commit();
}
// Trigger callback if module needs no recompilation.
@@ -2987,6 +2891,12 @@ void CompilationStateImpl::InitializeRecompilation(
TriggerCallbacks(base::EnumSet<CompilationEvent>(
{CompilationEvent::kFinishedRecompilation}));
}
+
+ if (builder.has_value()) {
+ // Avoid holding lock while scheduling a compile job.
+ guard.reset();
+ builder->Commit();
+ }
}
void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
@@ -3017,13 +2927,15 @@ void CompilationStateImpl::AddCompilationUnits(
compilation_unit_queues_.AddUnits(baseline_units, top_tier_units,
native_module_->module());
}
- js_to_wasm_wrapper_units_.insert(js_to_wasm_wrapper_units_.end(),
- js_to_wasm_wrapper_units.begin(),
- js_to_wasm_wrapper_units.end());
-
- size_t total_units = baseline_units.size() + top_tier_units.size() +
- js_to_wasm_wrapper_units.size();
- ScheduleCompileJobForNewUnits(static_cast<int>(total_units));
+ if (!js_to_wasm_wrapper_units.empty()) {
+ // |js_to_wasm_wrapper_units_| can only be modified before background
+ // compilation started.
+ DCHECK(!current_compile_job_ || !current_compile_job_->IsRunning());
+ js_to_wasm_wrapper_units_.insert(js_to_wasm_wrapper_units_.end(),
+ js_to_wasm_wrapper_units.begin(),
+ js_to_wasm_wrapper_units.end());
+ }
+ ScheduleCompileJobForNewUnits();
}
void CompilationStateImpl::AddTopTierCompilationUnit(WasmCompilationUnit unit) {
@@ -3033,7 +2945,7 @@ void CompilationStateImpl::AddTopTierCompilationUnit(WasmCompilationUnit unit) {
void CompilationStateImpl::AddTopTierPriorityCompilationUnit(
WasmCompilationUnit unit, size_t priority) {
compilation_unit_queues_.AddTopTierPriorityUnit(unit, priority);
- ScheduleCompileJobForNewUnits(1);
+ ScheduleCompileJobForNewUnits();
}
std::shared_ptr<JSToWasmWrapperCompilationUnit>
@@ -3055,7 +2967,7 @@ void CompilationStateImpl::FinalizeJSToWasmWrappers(
// optimization we keep the code space unlocked to avoid repeated unlocking
// because many such wrapper are allocated in sequence below.
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
- "wasm.FinalizeJSToWasmWrappers", "num_wrappers",
+ "wasm.FinalizeJSToWasmWrappers", "wrappers",
js_to_wasm_wrapper_units_.size());
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
for (auto& unit : js_to_wasm_wrapper_units_) {
@@ -3067,15 +2979,20 @@ void CompilationStateImpl::FinalizeJSToWasmWrappers(
}
}
+CompilationUnitQueues::Queue* CompilationStateImpl::GetQueueForCompileTask(
+ int task_id) {
+ return compilation_unit_queues_.GetQueueForTask(task_id);
+}
+
base::Optional<WasmCompilationUnit>
CompilationStateImpl::GetNextCompilationUnit(
- int task_id, CompileBaselineOnly baseline_only) {
- return compilation_unit_queues_.GetNextUnit(task_id, baseline_only);
+ CompilationUnitQueues::Queue* queue, CompileBaselineOnly baseline_only) {
+ return compilation_unit_queues_.GetNextUnit(queue, baseline_only);
}
void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
- "wasm.OnFinishedUnits", "num_units", code_vector.size());
+ "wasm.OnFinishedUnits", "units", code_vector.size());
base::MutexGuard guard(&callbacks_mutex_);
@@ -3230,24 +3147,7 @@ void CompilationStateImpl::TriggerCallbacks(
}
}
-int CompilationStateImpl::GetUnpublishedUnitsLimits(int task_id) {
- // We want background threads to publish regularly (to avoid contention when
- // they are all publishing at the end). On the other side, each publishing has
- // some overhead (part of it for synchronizing between threads), so it should
- // not happen *too* often.
- // Thus aim for 4-8 publishes per thread, but distribute it such that
- // publishing is likely to happen at different times.
- int units_per_thread =
- static_cast<int>(native_module_->module()->num_declared_functions /
- max_compile_concurrency_);
- int min = units_per_thread / 8;
- // Return something between {min} and {2*min}, but not smaller than {10}.
- return std::max(10, min + (min * task_id / max_compile_concurrency_));
-}
-
-void CompilationStateImpl::OnCompilationStopped(int task_id,
- const WasmFeatures& detected) {
- DCHECK_GE(max_compile_concurrency_, task_id);
+void CompilationStateImpl::OnCompilationStopped(const WasmFeatures& detected) {
base::MutexGuard guard(&mutex_);
detected_features_.Add(detected);
}
@@ -3260,40 +3160,104 @@ void CompilationStateImpl::PublishDetectedFeatures(Isolate* isolate) {
UpdateFeatureUseCounts(isolate, detected_features_);
}
-void CompilationStateImpl::ScheduleCompileJobForNewUnits(int new_units) {
- // Increase the {scheduled_units_approximation_} counter and remember the old
- // value to check whether it increased towards {max_compile_concurrency_}.
- // In that case, we need to notify the compile job about the increased
- // concurrency.
- DCHECK_LT(0, new_units);
- int old_units = scheduled_units_approximation_->fetch_add(
- new_units, std::memory_order_relaxed);
- bool concurrency_increased = old_units < max_compile_concurrency_;
+void CompilationStateImpl::PublishCompilationResults(
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code) {
+ if (unpublished_code.empty()) return;
- base::MutexGuard guard(&mutex_);
- if (current_compile_job_ && current_compile_job_->IsRunning()) {
- if (concurrency_increased) {
- current_compile_job_->NotifyConcurrencyIncrease();
+ // For import wrapper compilation units, add result to the cache.
+ int num_imported_functions = native_module_->num_imported_functions();
+ WasmImportWrapperCache* cache = native_module_->import_wrapper_cache();
+ for (const auto& code : unpublished_code) {
+ int func_index = code->index();
+ DCHECK_LE(0, func_index);
+ DCHECK_LT(func_index, native_module_->num_functions());
+ if (func_index < num_imported_functions) {
+ const FunctionSig* sig =
+ native_module_->module()->functions[func_index].sig;
+ WasmImportWrapperCache::CacheKey key(
+ compiler::kDefaultImportCallKind, sig,
+ static_cast<int>(sig->parameter_count()));
+ // If two imported functions have the same key, only one of them should
+ // have been added as a compilation unit. So it is always the first time
+ // we compile a wrapper for this key here.
+ DCHECK_NULL((*cache)[key]);
+ (*cache)[key] = code.get();
+ code->IncRef();
}
- return;
}
+ PublishCode(VectorOf(unpublished_code));
+}
+
+void CompilationStateImpl::PublishCode(Vector<std::unique_ptr<WasmCode>> code) {
+ WasmCodeRefScope code_ref_scope;
+ std::vector<WasmCode*> published_code =
+ native_module_->PublishCode(std::move(code));
+ // Defer logging code in case wire bytes were not fully received yet.
+ if (native_module_->HasWireBytes()) {
+ native_module_->engine()->LogCode(VectorOf(published_code));
+ }
+
+ OnFinishedUnits(VectorOf(std::move(published_code)));
+}
+
+void CompilationStateImpl::SchedulePublishCompilationResults(
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code) {
+ {
+ base::MutexGuard guard(&publish_mutex_);
+ if (publisher_running_) {
+ // Add new code to the queue and return.
+ publish_queue_.reserve(publish_queue_.size() + unpublished_code.size());
+ for (auto& c : unpublished_code) {
+ publish_queue_.emplace_back(std::move(c));
+ }
+ return;
+ }
+ publisher_running_ = true;
+ }
+ while (true) {
+ PublishCompilationResults(std::move(unpublished_code));
+ unpublished_code.clear();
+
+ // Keep publishing new code that came in.
+ base::MutexGuard guard(&publish_mutex_);
+ DCHECK(publisher_running_);
+ if (publish_queue_.empty()) {
+ publisher_running_ = false;
+ return;
+ }
+ unpublished_code.swap(publish_queue_);
+ }
+}
+
+void CompilationStateImpl::ScheduleCompileJobForNewUnits() {
if (failed()) return;
- std::unique_ptr<JobTask> new_compile_job =
- std::make_unique<BackgroundCompileJob>(
- background_compile_token_, async_counters_,
- scheduled_units_approximation_, max_compile_concurrency_);
- // TODO(wasm): Lower priority for TurboFan-only jobs.
- std::shared_ptr<JobHandle> handle = V8::GetCurrentPlatform()->PostJob(
- has_priority_ ? TaskPriority::kUserBlocking : TaskPriority::kUserVisible,
- std::move(new_compile_job));
- native_module_->engine()->ShepherdCompileJobHandle(handle);
- current_compile_job_ =
- std::make_unique<ThreadSafeJobHandle>(std::move(handle));
+ std::shared_ptr<JobHandle> new_job_handle;
+ {
+ base::MutexGuard guard(&mutex_);
+ if (current_compile_job_ && current_compile_job_->IsValid()) {
+ current_compile_job_->NotifyConcurrencyIncrease();
+ return;
+ }
+
+ std::unique_ptr<JobTask> new_compile_job =
+ std::make_unique<BackgroundCompileJob>(native_module_weak_,
+ async_counters_);
+ // TODO(wasm): Lower priority for TurboFan-only jobs.
+ new_job_handle = V8::GetCurrentPlatform()->PostJob(
+ has_priority_ ? TaskPriority::kUserBlocking
+ : TaskPriority::kUserVisible,
+ std::move(new_compile_job));
+ current_compile_job_ = new_job_handle;
+ // Reset the priority. Later uses of the compilation state, e.g. for
+ // debugging, should compile with the default priority again.
+ has_priority_ = false;
+ }
- // Reset the priority. Later uses of the compilation state, e.g. for
- // debugging, should compile with the default priority again.
- has_priority_ = false;
+ if (new_job_handle) {
+ native_module_->engine()->ShepherdCompileJobHandle(
+ std::move(new_job_handle));
+ }
}
size_t CompilationStateImpl::NumOutstandingCompilations() const {
@@ -3307,12 +3271,14 @@ size_t CompilationStateImpl::NumOutstandingCompilations() const {
}
void CompilationStateImpl::SetError() {
+ compile_cancelled_.store(true, std::memory_order_relaxed);
if (compile_failed_.exchange(true, std::memory_order_relaxed)) {
return; // Already failed before.
}
base::MutexGuard callbacks_guard(&callbacks_mutex_);
TriggerCallbacks();
+ callbacks_.clear();
}
void CompilationStateImpl::WaitForCompilationEvent(
@@ -3330,7 +3296,7 @@ void CompilationStateImpl::WaitForCompilationEvent(
}
constexpr JobDelegate* kNoDelegate = nullptr;
- ExecuteCompilationUnits(background_compile_token_, async_counters_.get(),
+ ExecuteCompilationUnits(native_module_weak_, async_counters_.get(),
kNoDelegate, kBaselineOnly);
compilation_event_semaphore->Wait();
}
@@ -3350,7 +3316,6 @@ class CompileJSToWasmWrapperJob final : public JobTask {
size_t max_concurrency)
: queue_(queue),
compilation_units_(compilation_units),
- max_concurrency_(max_concurrency),
outstanding_units_(queue->size()) {}
void Run(JobDelegate* delegate) override {
@@ -3366,14 +3331,15 @@ class CompileJSToWasmWrapperJob final : public JobTask {
// {outstanding_units_} includes the units that other workers are currently
// working on, so we can safely ignore the {worker_count} and just return
// the current number of outstanding units.
- return std::min(max_concurrency_,
+ size_t flag_limit =
+ static_cast<size_t>(std::max(1, FLAG_wasm_num_compilation_tasks));
+ return std::min(flag_limit,
outstanding_units_.load(std::memory_order_relaxed));
}
private:
JSToWasmWrapperQueue* const queue_;
JSToWasmWrapperUnitMap* const compilation_units_;
- const size_t max_concurrency_;
std::atomic<size_t> outstanding_units_;
};
} // namespace
@@ -3395,7 +3361,8 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
if (queue.insert(key)) {
auto unit = std::make_unique<JSToWasmWrapperCompilationUnit>(
isolate, isolate->wasm_engine(), function.sig, module,
- function.imported, enabled_features);
+ function.imported, enabled_features,
+ JSToWasmWrapperCompilationUnit::kAllowGeneric);
compilation_units.emplace(key, std::move(unit));
}
}
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 6206d11986..e688bb9479 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -68,9 +68,6 @@ bool CompileLazy(Isolate*, NativeModule*, int func_index);
void TriggerTierUp(Isolate*, NativeModule*, int func_index);
-// Get the maximum concurrency for parallel compilation.
-int GetMaxCompileConcurrency();
-
template <typename Key, typename Hash>
class WrapperQueue {
public:
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index dea4e1cb69..6d684d3534 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -635,7 +635,8 @@ class ModuleDecoderImpl : public Decoder {
case kExternalMemory: {
// ===== Imported memory =============================================
if (!AddMemory(module_.get())) break;
- uint8_t flags = validate_memory_flags(&module_->has_shared_memory);
+ uint8_t flags = validate_memory_flags(&module_->has_shared_memory,
+ &module_->is_memory64);
consume_resizable_limits("memory", "pages", max_mem_pages(),
&module_->initial_pages,
&module_->has_maximum_pages, max_mem_pages(),
@@ -735,7 +736,8 @@ class ModuleDecoderImpl : public Decoder {
for (uint32_t i = 0; ok() && i < memory_count; i++) {
if (!AddMemory(module_.get())) break;
- uint8_t flags = validate_memory_flags(&module_->has_shared_memory);
+ uint8_t flags = validate_memory_flags(&module_->has_shared_memory,
+ &module_->is_memory64);
consume_resizable_limits("memory", "pages", max_mem_pages(),
&module_->initial_pages,
&module_->has_maximum_pages, max_mem_pages(),
@@ -1531,7 +1533,7 @@ class ModuleDecoderImpl : public Decoder {
return flags;
}
- uint8_t validate_memory_flags(bool* has_shared_memory) {
+ uint8_t validate_memory_flags(bool* has_shared_memory, bool* is_memory64) {
uint8_t flags = consume_u8("memory limits flags");
*has_shared_memory = false;
switch (flags) {
@@ -1542,8 +1544,9 @@ class ModuleDecoderImpl : public Decoder {
case kSharedWithMaximum:
if (!enabled_features_.has_threads()) {
errorf(pc() - 1,
- "invalid memory limits flags (enable via "
- "--experimental-wasm-threads)");
+ "invalid memory limits flags 0x%x (enable via "
+ "--experimental-wasm-threads)",
+ flags);
}
*has_shared_memory = true;
// V8 does not support shared memory without a maximum.
@@ -1557,9 +1560,14 @@ class ModuleDecoderImpl : public Decoder {
case kMemory64WithMaximum:
if (!enabled_features_.has_memory64()) {
errorf(pc() - 1,
- "invalid memory limits flags (enable via "
- "--experimental-wasm-memory64)");
+ "invalid memory limits flags 0x%x (enable via "
+ "--experimental-wasm-memory64)",
+ flags);
}
+ *is_memory64 = true;
+ break;
+ default:
+ errorf(pc() - 1, "invalid memory limits flags 0x%x", flags);
break;
}
return flags;
@@ -1618,7 +1626,8 @@ class ModuleDecoderImpl : public Decoder {
// TODO(manoskouk): This is copy-modified from function-body-decoder-impl.h.
// We should find a way to share this code.
- V8_INLINE bool Validate(const byte* pc, HeapTypeImmediate<kValidate>& imm) {
+ V8_INLINE bool Validate(const byte* pc,
+ HeapTypeImmediate<kFullValidation>& imm) {
if (V8_UNLIKELY(imm.type.is_bottom())) {
error(pc, "invalid heap type");
return false;
@@ -1633,7 +1642,7 @@ class ModuleDecoderImpl : public Decoder {
WasmInitExpr consume_init_expr(WasmModule* module, ValueType expected,
size_t current_global_index) {
- constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
+ constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
WasmOpcode opcode = kExprNop;
std::vector<WasmInitExpr> stack;
while (pc() < end() && opcode != kExprEnd) {
@@ -1670,25 +1679,25 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kExprI32Const: {
- ImmI32Immediate<Decoder::kValidate> imm(this, pc() + 1);
+ ImmI32Immediate<Decoder::kFullValidation> imm(this, pc() + 1);
stack.emplace_back(imm.value);
len = 1 + imm.length;
break;
}
case kExprF32Const: {
- ImmF32Immediate<Decoder::kValidate> imm(this, pc() + 1);
+ ImmF32Immediate<Decoder::kFullValidation> imm(this, pc() + 1);
stack.emplace_back(imm.value);
len = 1 + imm.length;
break;
}
case kExprI64Const: {
- ImmI64Immediate<Decoder::kValidate> imm(this, pc() + 1);
+ ImmI64Immediate<Decoder::kFullValidation> imm(this, pc() + 1);
stack.emplace_back(imm.value);
len = 1 + imm.length;
break;
}
case kExprF64Const: {
- ImmF64Immediate<Decoder::kValidate> imm(this, pc() + 1);
+ ImmF64Immediate<Decoder::kFullValidation> imm(this, pc() + 1);
stack.emplace_back(imm.value);
len = 1 + imm.length;
break;
@@ -1702,8 +1711,8 @@ class ModuleDecoderImpl : public Decoder {
kExprRefNull);
return {};
}
- HeapTypeImmediate<Decoder::kValidate> imm(enabled_features_, this,
- pc() + 1);
+ HeapTypeImmediate<Decoder::kFullValidation> imm(enabled_features_,
+ this, pc() + 1);
len = 1 + imm.length;
if (!Validate(pc() + 1, imm)) return {};
stack.push_back(
@@ -1719,7 +1728,7 @@ class ModuleDecoderImpl : public Decoder {
return {};
}
- FunctionIndexImmediate<Decoder::kValidate> imm(this, pc() + 1);
+ FunctionIndexImmediate<Decoder::kFullValidation> imm(this, pc() + 1);
len = 1 + imm.length;
if (V8_UNLIKELY(module->functions.size() <= imm.index)) {
errorf(pc(), "invalid function index: %u", imm.index);
@@ -1741,8 +1750,8 @@ class ModuleDecoderImpl : public Decoder {
return {};
}
- Simd128Immediate<validate> imm(this, pc() + len + 1);
- len += 1 + kSimd128Size;
+ Simd128Immediate<validate> imm(this, pc() + len);
+ len += kSimd128Size;
stack.emplace_back(imm.value);
break;
}
@@ -1755,8 +1764,8 @@ class ModuleDecoderImpl : public Decoder {
case kExprRttCanon: {
HeapTypeImmediate<validate> imm(enabled_features_, this,
pc() + 2);
- len += 1 + imm.length;
- if (!Validate(pc() + 2, imm)) return {};
+ len += imm.length;
+ if (!Validate(pc() + len, imm)) return {};
stack.push_back(
WasmInitExpr::RttCanon(imm.type.representation()));
break;
@@ -1764,8 +1773,8 @@ class ModuleDecoderImpl : public Decoder {
case kExprRttSub: {
HeapTypeImmediate<validate> imm(enabled_features_, this,
pc() + 2);
- len += 1 + imm.length;
- if (!Validate(pc() + 2, imm)) return {};
+ len += imm.length;
+ if (!Validate(pc() + len, imm)) return {};
if (stack.empty()) {
error(pc(), "calling rtt.sub without arguments");
return {};
@@ -1836,7 +1845,7 @@ class ModuleDecoderImpl : public Decoder {
ValueType consume_value_type() {
uint32_t type_length;
- ValueType result = value_type_reader::read_value_type<kValidate>(
+ ValueType result = value_type_reader::read_value_type<kFullValidation>(
this, this->pc(), &type_length,
origin_ == kWasmOrigin ? enabled_features_ : WasmFeatures::None());
if (result == kWasmBottom) error(pc_, "invalid value type");
@@ -1850,7 +1859,7 @@ class ModuleDecoderImpl : public Decoder {
}
ValueType consume_storage_type() {
- uint8_t opcode = read_u8<kValidate>(this->pc());
+ uint8_t opcode = read_u8<kFullValidation>(this->pc());
switch (opcode) {
case kI8Code:
consume_bytes(1, "i8");
@@ -1961,10 +1970,10 @@ class ModuleDecoderImpl : public Decoder {
ValueType* type, uint32_t* table_index,
WasmInitExpr* offset) {
const byte* pos = pc();
- uint8_t flag;
+ uint32_t flag;
if (enabled_features_.has_bulk_memory() ||
enabled_features_.has_reftypes()) {
- flag = consume_u8("flag");
+ flag = consume_u32v("flag");
} else {
uint32_t table_index = consume_u32v("table index");
// The only valid flag value without bulk_memory or externref is '0'.
@@ -2133,7 +2142,8 @@ class ModuleDecoderImpl : public Decoder {
if (failed()) return index;
switch (opcode) {
case kExprRefNull: {
- HeapTypeImmediate<kValidate> imm(WasmFeatures::All(), this, this->pc());
+ HeapTypeImmediate<kFullValidation> imm(WasmFeatures::All(), this,
+ this->pc());
consume_bytes(imm.length, "ref.null immediate");
index = WasmElemSegment::kNullIndex;
break;
@@ -2172,13 +2182,14 @@ ModuleResult DecodeWasmModule(
// as the {module}.
ModuleDecoderImpl decoder(enabled, module_start, module_end, origin);
v8::metrics::WasmModuleDecoded metrics_event;
- metrics::TimedScope<v8::metrics::WasmModuleDecoded> metrics_event_scope(
- &metrics_event, &v8::metrics::WasmModuleDecoded::wall_clock_time_in_us);
+ base::ElapsedTimer timer;
+ timer.Start();
ModuleResult result =
decoder.DecodeModule(counters, allocator, verify_functions);
// Record event metrics.
- metrics_event_scope.Stop();
+ metrics_event.wall_clock_duration_in_us = timer.Elapsed().InMicroseconds();
+ timer.Stop();
metrics_event.success = decoder.ok() && result.ok();
metrics_event.async = decoding_method == DecodingMethod::kAsync ||
decoding_method == DecodingMethod::kAsyncStream;
@@ -2438,14 +2449,8 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
// Extract from export table.
for (const WasmExport& exp : export_table) {
- switch (exp.kind) {
- case kExternalFunction:
- if (names->count(exp.index) == 0) {
- names->insert(std::make_pair(exp.index, exp.name));
- }
- break;
- default:
- break;
+ if (exp.kind == kExternalFunction && names->count(exp.index) == 0) {
+ names->insert(std::make_pair(exp.index, exp.name));
}
}
}
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index d31bafb294..e8b0a4f8e6 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -58,25 +58,32 @@ uint32_t EvalUint32InitExpr(Handle<WasmInstanceObject> instance,
using ImportWrapperQueue = WrapperQueue<WasmImportWrapperCache::CacheKey,
WasmImportWrapperCache::CacheKeyHash>;
-class CompileImportWrapperTask final : public CancelableTask {
+class CompileImportWrapperJob final : public JobTask {
public:
- CompileImportWrapperTask(
- CancelableTaskManager* task_manager, WasmEngine* engine,
- Counters* counters, NativeModule* native_module,
+ CompileImportWrapperJob(
+ WasmEngine* engine, Counters* counters, NativeModule* native_module,
ImportWrapperQueue* queue,
WasmImportWrapperCache::ModificationScope* cache_scope)
- : CancelableTask(task_manager),
- engine_(engine),
+ : engine_(engine),
counters_(counters),
native_module_(native_module),
queue_(queue),
cache_scope_(cache_scope) {}
- void RunInternal() override {
+ size_t GetMaxConcurrency(size_t worker_count) const override {
+ size_t flag_limit =
+ static_cast<size_t>(std::max(1, FLAG_wasm_num_compilation_tasks));
+ // Add {worker_count} to the queue size because workers might still be
+ // processing units that have already been popped from the queue.
+ return std::min(flag_limit, worker_count + queue_->size());
+ }
+
+ void Run(JobDelegate* delegate) override {
while (base::Optional<WasmImportWrapperCache::CacheKey> key =
queue_->pop()) {
CompileImportWrapper(engine_, native_module_, counters_, key->kind,
key->signature, key->expected_arity, cache_scope_);
+ if (delegate->ShouldYield()) return;
}
}
@@ -410,10 +417,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
TimedHistogramScope wasm_instantiate_module_time_scope(SELECT_WASM_COUNTER(
isolate_->counters(), module_->origin, wasm_instantiate, module_time));
v8::metrics::WasmModuleInstantiated wasm_module_instantiated;
- metrics::TimedScope<v8::metrics::WasmModuleInstantiated>
- wasm_module_instantiated_timed_scope(
- &wasm_module_instantiated,
- &v8::metrics::WasmModuleInstantiated::wall_clock_time_in_us);
+ base::ElapsedTimer timer;
+ timer.Start();
NativeModule* native_module = module_object_->native_module();
//--------------------------------------------------------------------------
@@ -745,7 +750,9 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
TRACE("Successfully built instance for module %p\n",
module_object_->native_module());
wasm_module_instantiated.success = true;
- wasm_module_instantiated_timed_scope.Stop();
+ wasm_module_instantiated.wall_clock_duration_in_us =
+ timer.Elapsed().InMicroseconds();
+ timer.Stop();
isolate_->metrics_recorder()->DelayMainThreadEvent(wasm_module_instantiated,
context_id_);
return instance;
@@ -1074,8 +1081,7 @@ bool InstanceBuilder::ProcessImportedFunction(
// The imported function is a callable.
int expected_arity = static_cast<int>(expected_sig->parameter_count());
- if (kind ==
- compiler::WasmImportCallKind::kJSFunctionArityMismatchSkipAdaptor) {
+ if (kind == compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
Handle<JSFunction> function = Handle<JSFunction>::cast(js_receiver);
SharedFunctionInfo shared = function->shared();
expected_arity = shared.internal_formal_parameter_count();
@@ -1450,7 +1456,7 @@ void InstanceBuilder::CompileImportWrappers(
int expected_arity = static_cast<int>(sig->parameter_count());
if (resolved.first ==
- compiler::WasmImportCallKind::kJSFunctionArityMismatchSkipAdaptor) {
+ compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
Handle<JSFunction> function = Handle<JSFunction>::cast(resolved.second);
SharedFunctionInfo shared = function->shared();
expected_arity = shared.internal_formal_parameter_count();
@@ -1464,24 +1470,14 @@ void InstanceBuilder::CompileImportWrappers(
import_wrapper_queue.insert(key);
}
- CancelableTaskManager task_manager;
- // TODO(wasm): Switch this to the Jobs API.
- const int max_background_tasks = GetMaxCompileConcurrency();
- for (int i = 0; i < max_background_tasks; ++i) {
- auto task = std::make_unique<CompileImportWrapperTask>(
- &task_manager, isolate_->wasm_engine(), isolate_->counters(),
- native_module, &import_wrapper_queue, &cache_scope);
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
- }
+ auto compile_job_task = std::make_unique<CompileImportWrapperJob>(
+ isolate_->wasm_engine(), isolate_->counters(), native_module,
+ &import_wrapper_queue, &cache_scope);
+ auto compile_job = V8::GetCurrentPlatform()->PostJob(
+ TaskPriority::kUserVisible, std::move(compile_job_task));
- // Also compile in the current thread, in case there are no worker threads.
- while (base::Optional<WasmImportWrapperCache::CacheKey> key =
- import_wrapper_queue.pop()) {
- CompileImportWrapper(isolate_->wasm_engine(), native_module,
- isolate_->counters(), key->kind, key->signature,
- key->expected_arity, &cache_scope);
- }
- task_manager.CancelAndWait();
+ // Wait for the job to finish, while contributing in this thread.
+ compile_job->Join();
}
// Process the imports, including functions, tables, globals, and memory, in
@@ -1947,7 +1943,7 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
// Update the local dispatch table first if necessary.
if (IsSubtypeOf(table_object->type(), kWasmFuncRef, module)) {
- uint32_t sig_id = module->signature_ids[function->sig_index];
+ uint32_t sig_id = module->canonicalized_type_ids[function->sig_index];
IndirectFunctionTableEntry(instance, table_index, entry_index)
.Set(sig_id, instance, func_index);
}
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index c9f984aaee..d1312edd33 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -28,6 +28,8 @@ namespace wasm {
class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
public:
explicit AsyncStreamingDecoder(std::unique_ptr<StreamingProcessor> processor);
+ AsyncStreamingDecoder(const AsyncStreamingDecoder&) = delete;
+ AsyncStreamingDecoder& operator=(const AsyncStreamingDecoder&) = delete;
// The buffer passed into OnBytesReceived is owned by the caller.
void OnBytesReceived(Vector<const uint8_t> bytes) override;
@@ -218,8 +220,6 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
// We need wire bytes in an array for deserializing cached modules.
std::vector<uint8_t> wire_bytes_for_deserializing_;
-
- DISALLOW_COPY_AND_ASSIGN(AsyncStreamingDecoder);
};
void AsyncStreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
@@ -517,10 +517,6 @@ size_t AsyncStreamingDecoder::DecodeVarInt32::ReadBytes(
Decoder decoder(buf,
streaming->module_offset() - static_cast<uint32_t>(offset()));
value_ = decoder.consume_u32v(field_name_);
- // The number of bytes we actually needed to read.
- DCHECK_GT(decoder.pc(), buffer().begin());
- bytes_consumed_ = static_cast<size_t>(decoder.pc() - buf.begin());
- TRACE_STREAMING(" ==> %zu bytes consumed\n", bytes_consumed_);
if (decoder.failed()) {
if (new_bytes == remaining_buf.size()) {
@@ -531,6 +527,11 @@ size_t AsyncStreamingDecoder::DecodeVarInt32::ReadBytes(
return new_bytes;
}
+ // The number of bytes we actually needed to read.
+ DCHECK_GT(decoder.pc(), buffer().begin());
+ bytes_consumed_ = static_cast<size_t>(decoder.pc() - buf.begin());
+ TRACE_STREAMING(" ==> %zu bytes consumed\n", bytes_consumed_);
+
// We read all the bytes we needed.
DCHECK_GT(bytes_consumed_, offset());
new_bytes = bytes_consumed_ - offset();
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 2e9a2a8d06..3731511c24 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -178,38 +178,7 @@ class ValueType {
#undef DEF_ENUM
};
- constexpr bool is_reference_type() const {
- return kind() == kRef || kind() == kOptRef || kind() == kRtt;
- }
-
- constexpr bool is_object_reference_type() const {
- return kind() == kRef || kind() == kOptRef;
- }
-
- constexpr bool is_packed() const { return kind() == kI8 || kind() == kI16; }
-
- constexpr bool is_nullable() const { return kind() == kOptRef; }
-
- constexpr bool is_reference_to(uint32_t htype) const {
- return (kind() == kRef || kind() == kOptRef) &&
- heap_representation() == htype;
- }
-
- constexpr bool is_defaultable() const {
- CONSTEXPR_DCHECK(kind() != kBottom && kind() != kStmt);
- return kind() != kRef && kind() != kRtt;
- }
-
- constexpr ValueType Unpacked() const {
- return is_packed() ? Primitive(kI32) : *this;
- }
-
- constexpr bool has_index() const {
- return is_reference_type() && heap_type().is_index();
- }
- constexpr bool is_rtt() const { return kind() == kRtt; }
- constexpr bool has_depth() const { return is_rtt(); }
-
+ /******************************* Constructors *******************************/
constexpr ValueType() : bit_field_(KindField::encode(kStmt)) {}
static constexpr ValueType Primitive(Kind kind) {
CONSTEXPR_DCHECK(kind == kBottom || kind <= kI16);
@@ -242,6 +211,43 @@ class ValueType {
return ValueType(bit_field);
}
+ /******************************** Type checks *******************************/
+ constexpr bool is_reference_type() const {
+ return kind() == kRef || kind() == kOptRef || kind() == kRtt;
+ }
+
+ constexpr bool is_object_reference_type() const {
+ return kind() == kRef || kind() == kOptRef;
+ }
+
+ constexpr bool is_nullable() const { return kind() == kOptRef; }
+
+ constexpr bool is_reference_to(uint32_t htype) const {
+ return (kind() == kRef || kind() == kOptRef) &&
+ heap_representation() == htype;
+ }
+
+ constexpr bool is_rtt() const { return kind() == kRtt; }
+ constexpr bool has_depth() const { return is_rtt(); }
+
+ constexpr bool has_index() const {
+ return is_reference_type() && heap_type().is_index();
+ }
+
+ constexpr bool is_defaultable() const {
+ CONSTEXPR_DCHECK(kind() != kBottom && kind() != kStmt);
+ return kind() != kRef && kind() != kRtt;
+ }
+
+ constexpr bool is_bottom() const { return kind() == kBottom; }
+
+ constexpr bool is_packed() const { return kind() == kI8 || kind() == kI16; }
+
+ constexpr ValueType Unpacked() const {
+ return is_packed() ? Primitive(kI32) : *this;
+ }
+
+ /***************************** Field Accessors ******************************/
constexpr Kind kind() const { return KindField::decode(bit_field_); }
constexpr HeapType::Representation heap_representation() const {
CONSTEXPR_DCHECK(is_reference_type());
@@ -263,6 +269,14 @@ class ValueType {
// Useful when serializing this type to store it into a runtime object.
constexpr uint32_t raw_bit_field() const { return bit_field_; }
+ /*************************** Other utility methods **************************/
+ constexpr bool operator==(ValueType other) const {
+ return bit_field_ == other.bit_field_;
+ }
+ constexpr bool operator!=(ValueType other) const {
+ return bit_field_ != other.bit_field_;
+ }
+
static constexpr size_t bit_field_offset() {
return offsetof(ValueType, bit_field_);
}
@@ -292,13 +306,7 @@ class ValueType {
return size;
}
- constexpr bool operator==(ValueType other) const {
- return bit_field_ == other.bit_field_;
- }
- constexpr bool operator!=(ValueType other) const {
- return bit_field_ != other.bit_field_;
- }
-
+ /*************************** Machine-type related ***************************/
constexpr MachineType machine_type() const {
CONSTEXPR_DCHECK(kBottom != kind());
@@ -316,6 +324,29 @@ class ValueType {
return machine_type().representation();
}
+ static ValueType For(MachineType type) {
+ switch (type.representation()) {
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return Primitive(kI32);
+ case MachineRepresentation::kWord64:
+ return Primitive(kI64);
+ case MachineRepresentation::kFloat32:
+ return Primitive(kF32);
+ case MachineRepresentation::kFloat64:
+ return Primitive(kF64);
+ case MachineRepresentation::kTaggedPointer:
+ return Ref(HeapType::kExtern, kNullable);
+ case MachineRepresentation::kSimd128:
+ return Primitive(kS128);
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ /********************************* Encoding *********************************/
+
// Returns the first byte of this type's representation in the wasm binary
// format.
// For compatibility with the reftypes and exception-handling proposals, this
@@ -365,27 +396,9 @@ class ValueType {
heap_representation() == HeapType::kI31));
}
- static ValueType For(MachineType type) {
- switch (type.representation()) {
- case MachineRepresentation::kWord8:
- case MachineRepresentation::kWord16:
- case MachineRepresentation::kWord32:
- return Primitive(kI32);
- case MachineRepresentation::kWord64:
- return Primitive(kI64);
- case MachineRepresentation::kFloat32:
- return Primitive(kF32);
- case MachineRepresentation::kFloat64:
- return Primitive(kF64);
- case MachineRepresentation::kTaggedPointer:
- return Ref(HeapType::kExtern, kNullable);
- case MachineRepresentation::kSimd128:
- return Primitive(kS128);
- default:
- UNREACHABLE();
- }
- }
+ static constexpr int kLastUsedBit = 30;
+ /****************************** Pretty-printing *****************************/
constexpr char short_name() const {
constexpr char kShortName[] = {
#define SHORT_NAME(kind, log2Size, code, machineType, shortName, ...) shortName,
@@ -425,8 +438,6 @@ class ValueType {
return buf.str();
}
- static constexpr int kLastUsedBit = 30;
-
private:
// We only use 31 bits so ValueType fits in a Smi. This can be changed if
// needed.
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index ac68dc970c..cd90524599 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -323,8 +323,12 @@ void WasmCode::Validate() const {
void WasmCode::MaybePrint(const char* name) const {
// Determines whether flags want this code to be printed.
- if ((FLAG_print_wasm_code && kind() == kFunction) ||
- (FLAG_print_wasm_stub_code && kind() != kFunction) || FLAG_print_code) {
+ bool function_index_matches =
+ (!IsAnonymous() &&
+ FLAG_print_wasm_code_function_index == static_cast<int>(index()));
+ if (FLAG_print_code ||
+ (kind() == kFunction ? (FLAG_print_wasm_code || function_index_matches)
+ : FLAG_print_wasm_stub_code)) {
Print(name);
}
}
@@ -854,7 +858,7 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
void NativeModule::LogWasmCodes(Isolate* isolate) {
if (!WasmCode::ShouldBeLogged(isolate)) return;
- TRACE_EVENT1("v8.wasm", "wasm.LogWasmCodes", "num_functions",
+ TRACE_EVENT1("v8.wasm", "wasm.LogWasmCodes", "functions",
module_->num_declared_functions);
// TODO(titzer): we skip the logging of the import wrappers
@@ -874,11 +878,7 @@ CompilationEnv NativeModule::CreateCompilationEnv() const {
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
CODE_SPACE_WRITE_SCOPE
- // For off-heap builtins, we create a copy of the off-heap instruction stream
- // instead of the on-heap code object containing the trampoline. Ensure that
- // we do not apply the on-heap reloc info to the off-heap instructions.
- const size_t relocation_size =
- code->is_off_heap_trampoline() ? 0 : code->relocation_size();
+ const size_t relocation_size = code->relocation_size();
OwnedVector<byte> reloc_info;
if (relocation_size > 0) {
reloc_info = OwnedVector<byte>::Of(
@@ -892,19 +892,25 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
source_pos_table->copy_out(0, source_pos.start(),
source_pos_table->length());
}
+ CHECK(!code->is_off_heap_trampoline());
+ STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
Vector<const byte> instructions(
- reinterpret_cast<byte*>(code->InstructionStart()),
- static_cast<size_t>(code->InstructionSize()));
+ reinterpret_cast<byte*>(code->raw_body_start()),
+ static_cast<size_t>(code->raw_body_size()));
const int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
+ // Metadata offsets in Code objects are relative to the start of the metadata
+ // section, whereas WasmCode expects offsets relative to InstructionStart.
+ const int base_offset = code->raw_instruction_size();
// TODO(jgruber,v8:8758): Remove this translation. It exists only because
// Code objects contains real offsets but WasmCode expects an offset of 0 to
// mean 'empty'.
const int safepoint_table_offset =
- code->has_safepoint_table() ? code->safepoint_table_offset() : 0;
- const int handler_table_offset = code->handler_table_offset();
- const int constant_pool_offset = code->constant_pool_offset();
- const int code_comments_offset = code->code_comments_offset();
+ code->has_safepoint_table() ? base_offset + code->safepoint_table_offset()
+ : 0;
+ const int handler_table_offset = base_offset + code->handler_table_offset();
+ const int constant_pool_offset = base_offset + code->constant_pool_offset();
+ const int code_comments_offset = base_offset + code->code_comments_offset();
Vector<uint8_t> dst_code_bytes =
code_allocator_.AllocateForCode(this, instructions.size());
@@ -912,7 +918,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
// Apply the relocation delta by iterating over the RelocInfo.
intptr_t delta = reinterpret_cast<Address>(dst_code_bytes.begin()) -
- code->InstructionStart();
+ code->raw_instruction_start();
int mode_mask =
RelocInfo::kApplyMask | RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
auto jump_tables_ref =
@@ -1081,12 +1087,16 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
}
WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.PublishCode");
base::MutexGuard lock(&allocation_mutex_);
return PublishCodeLocked(std::move(code));
}
std::vector<WasmCode*> NativeModule::PublishCode(
Vector<std::unique_ptr<WasmCode>> codes) {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.PublishCode", "number", codes.size());
std::vector<WasmCode*> published_code;
published_code.reserve(codes.size());
base::MutexGuard lock(&allocation_mutex_);
@@ -1362,10 +1372,10 @@ void NativeModule::AddCodeSpace(
WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
#undef RUNTIME_STUB
#undef RUNTIME_STUB_TRAP
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
Address builtin_addresses[WasmCode::kRuntimeStubCount];
for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
Builtins::Name builtin = stub_names[i];
- CHECK(embedded_data.ContainsBuiltin(builtin));
builtin_addresses[i] = embedded_data.InstructionStartOfBuiltin(builtin);
}
JumpTableAssembler::GenerateFarJumpTable(
@@ -1468,7 +1478,11 @@ NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegion(
size_t max_distance = std::max(
code_region.end() > table_start ? code_region.end() - table_start : 0,
table_end > code_region.begin() ? table_end - code_region.begin() : 0);
- return max_distance < WasmCodeAllocator::kMaxCodeSpaceSize;
+ // We can allow a max_distance that is equal to kMaxCodeSpaceSize, because
+ // every call or jump will target an address *within* the region, but never
+ // exactly the end of the region. So all occuring offsets are actually
+ // smaller than max_distance.
+ return max_distance <= WasmCodeAllocator::kMaxCodeSpaceSize;
};
// Fast path: Try to use {main_jump_table_} and {main_far_jump_table_}.
@@ -1881,6 +1895,8 @@ std::unique_ptr<WasmCode> NativeModule::AddCompiledCode(
std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
Vector<WasmCompilationResult> results) {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.AddCompiledCode", "num", results.size());
DCHECK(!results.empty());
// First, allocate code space for all the results.
size_t total_code_space = 0;
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 5e8ed5475b..f017b977b5 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -57,6 +57,7 @@ struct WasmModule;
V(WasmFloat64ToNumber) \
V(WasmTaggedToFloat64) \
V(WasmAllocateJSArray) \
+ V(WasmAllocatePair) \
V(WasmAtomicNotify) \
V(WasmI32AtomicWait32) \
V(WasmI32AtomicWait64) \
@@ -200,6 +201,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
static bool ShouldBeLogged(Isolate* isolate);
void LogCode(Isolate* isolate) const;
+ WasmCode(const WasmCode&) = delete;
+ WasmCode& operator=(const WasmCode&) = delete;
~WasmCode();
void IncRef() {
@@ -348,8 +351,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
// from (3) and all (2)), the code object is deleted and the memory for the
// machine code is freed.
std::atomic<int> ref_count_{1};
-
- DISALLOW_COPY_AND_ASSIGN(WasmCode);
};
// Check that {WasmCode} objects are sufficiently small. We create many of them,
@@ -476,6 +477,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = false;
#endif
+ NativeModule(const NativeModule&) = delete;
+ NativeModule& operator=(const NativeModule&) = delete;
+ ~NativeModule();
+
// {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
// code below, i.e. it can be called concurrently from background threads.
// The returned code still needs to be published via {PublishCode}.
@@ -612,8 +617,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
return import_wrapper_cache_.get();
}
- ~NativeModule();
-
const WasmFeatures& enabled_features() const { return enabled_features_; }
// Returns the runtime stub id that corresponds to the given address (which
@@ -794,13 +797,13 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::atomic<size_t> liftoff_bailout_count_{0};
std::atomic<size_t> liftoff_code_size_{0};
std::atomic<size_t> turbofan_code_size_{0};
-
- DISALLOW_COPY_AND_ASSIGN(NativeModule);
};
class V8_EXPORT_PRIVATE WasmCodeManager final {
public:
explicit WasmCodeManager(size_t max_committed);
+ WasmCodeManager(const WasmCodeManager&) = delete;
+ WasmCodeManager& operator=(const WasmCodeManager&) = delete;
#ifdef DEBUG
~WasmCodeManager() {
@@ -872,8 +875,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// End of fields protected by {native_modules_mutex_}.
//////////////////////////////////////////////////////////////////////////////
-
- DISALLOW_COPY_AND_ASSIGN(WasmCodeManager);
};
// Within the scope, the native_module is writable and not executable.
@@ -901,6 +902,8 @@ class NativeModuleModificationScope final {
class V8_EXPORT_PRIVATE WasmCodeRefScope {
public:
WasmCodeRefScope();
+ WasmCodeRefScope(const WasmCodeRefScope&) = delete;
+ WasmCodeRefScope& operator=(const WasmCodeRefScope&) = delete;
~WasmCodeRefScope();
// Register a {WasmCode} reference in the current {WasmCodeRefScope}. Fails if
@@ -910,8 +913,6 @@ class V8_EXPORT_PRIVATE WasmCodeRefScope {
private:
WasmCodeRefScope* const previous_scope_;
std::unordered_set<WasmCode*> code_ptrs_;
-
- DISALLOW_COPY_AND_ASSIGN(WasmCodeRefScope);
};
// Similarly to a global handle, a {GlobalWasmCodeRef} stores a single
@@ -924,6 +925,9 @@ class GlobalWasmCodeRef {
code_->IncRef();
}
+ GlobalWasmCodeRef(const GlobalWasmCodeRef&) = delete;
+ GlobalWasmCodeRef& operator=(const GlobalWasmCodeRef&) = delete;
+
~GlobalWasmCodeRef() { WasmCode::DecrementRefCount({&code_, 1}); }
// Get a pointer to the contained {WasmCode} object. This is only guaranteed
@@ -934,7 +938,6 @@ class GlobalWasmCodeRef {
WasmCode* const code_;
// Also keep the {NativeModule} alive.
const std::shared_ptr<NativeModule> native_module_;
- DISALLOW_COPY_AND_ASSIGN(GlobalWasmCodeRef);
};
const char* GetRuntimeStubName(WasmCode::RuntimeStubId);
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index 4e701599fc..31a519ee2e 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -123,6 +123,11 @@ constexpr uint32_t kExceptionAttribute = 0;
constexpr int kAnonymousFuncIndex = -1;
+// The number of calls to an exported wasm function that will be handled
+// by the generic wrapper. Once this threshold is reached, a specific wrapper
+// is to be compiled for the function's signature.
+constexpr uint32_t kGenericWrapperThreshold = 6;
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-debug-evaluate.cc b/deps/v8/src/wasm/wasm-debug-evaluate.cc
index d8abe49679..bbd75f6b18 100644
--- a/deps/v8/src/wasm/wasm-debug-evaluate.cc
+++ b/deps/v8/src/wasm/wasm-debug-evaluate.cc
@@ -81,7 +81,7 @@ static bool CheckRangeOutOfBounds(uint32_t offset, uint32_t size,
class DebugEvaluatorProxy {
public:
- explicit DebugEvaluatorProxy(Isolate* isolate, StandardFrame* frame)
+ explicit DebugEvaluatorProxy(Isolate* isolate, CommonFrame* frame)
: isolate_(isolate), frame_(frame) {}
static void GetMemoryTrampoline(
@@ -283,7 +283,7 @@ class DebugEvaluatorProxy {
}
Isolate* isolate_;
- StandardFrame* frame_;
+ CommonFrame* frame_;
Handle<WasmInstanceObject> evaluator_;
Handle<WasmInstanceObject> debuggee_;
};
@@ -356,7 +356,7 @@ static bool VerifyEvaluatorInterface(const WasmModule* raw_module,
Maybe<std::string> DebugEvaluateImpl(
Vector<const byte> snippet, Handle<WasmInstanceObject> debuggee_instance,
- StandardFrame* frame) {
+ CommonFrame* frame) {
Isolate* isolate = debuggee_instance->GetIsolate();
HandleScope handle_scope(isolate);
WasmEngine* engine = isolate->wasm_engine();
@@ -433,7 +433,7 @@ Maybe<std::string> DebugEvaluateImpl(
MaybeHandle<String> DebugEvaluate(Vector<const byte> snippet,
Handle<WasmInstanceObject> debuggee_instance,
- StandardFrame* frame) {
+ CommonFrame* frame) {
Maybe<std::string> result =
DebugEvaluateImpl(snippet, debuggee_instance, frame);
if (result.IsNothing()) return {};
diff --git a/deps/v8/src/wasm/wasm-debug-evaluate.h b/deps/v8/src/wasm/wasm-debug-evaluate.h
index f4e3aef175..ab84a736a8 100644
--- a/deps/v8/src/wasm/wasm-debug-evaluate.h
+++ b/deps/v8/src/wasm/wasm-debug-evaluate.h
@@ -13,9 +13,9 @@ namespace v8 {
namespace internal {
namespace wasm {
-MaybeHandle<String> V8_EXPORT_PRIVATE DebugEvaluate(
- Vector<const byte> snippet, Handle<WasmInstanceObject> debuggee_instance,
- StandardFrame* frame);
+MaybeHandle<String> V8_EXPORT_PRIVATE
+DebugEvaluate(Vector<const byte> snippet,
+ Handle<WasmInstanceObject> debuggee_instance, CommonFrame* frame);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index d05caa4144..5da5525045 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -51,60 +51,6 @@ Handle<String> PrintFToOneByteString(Isolate* isolate, const char* format,
: isolate->factory()->NewStringFromOneByte(name).ToHandleChecked();
}
-MaybeHandle<JSObject> CreateFunctionTablesObject(
- Handle<WasmInstanceObject> instance) {
- Isolate* isolate = instance->GetIsolate();
- auto tables = handle(instance->tables(), isolate);
- if (tables->length() == 0) return MaybeHandle<JSObject>();
-
- const char* table_label = "table%d";
- Handle<JSObject> tables_obj = isolate->factory()->NewJSObjectWithNullProto();
- for (int table_index = 0; table_index < tables->length(); ++table_index) {
- auto func_table =
- handle(WasmTableObject::cast(tables->get(table_index)), isolate);
- if (!IsSubtypeOf(func_table->type(), kWasmFuncRef, instance->module()))
- continue;
-
- Handle<String> table_name;
- if (!WasmInstanceObject::GetTableNameOrNull(isolate, instance, table_index)
- .ToHandle(&table_name)) {
- table_name =
- PrintFToOneByteString<true>(isolate, table_label, table_index);
- }
-
- Handle<JSObject> func_table_obj =
- isolate->factory()->NewJSObjectWithNullProto();
- JSObject::AddProperty(isolate, tables_obj, table_name, func_table_obj,
- NONE);
- for (int i = 0; i < func_table->current_length(); ++i) {
- Handle<Object> func = WasmTableObject::Get(isolate, func_table, i);
- DCHECK(!WasmCapiFunction::IsWasmCapiFunction(*func));
- if (func->IsNull(isolate)) continue;
-
- Handle<String> func_name;
- Handle<JSObject> func_obj =
- isolate->factory()->NewJSObjectWithNullProto();
-
- if (WasmExportedFunction::IsWasmExportedFunction(*func)) {
- auto target_func = Handle<WasmExportedFunction>::cast(func);
- auto target_instance = handle(target_func->instance(), isolate);
- auto module = handle(target_instance->module_object(), isolate);
- func_name = WasmModuleObject::GetFunctionName(
- isolate, module, target_func->function_index());
- } else if (WasmJSFunction::IsWasmJSFunction(*func)) {
- auto target_func = Handle<JSFunction>::cast(func);
- func_name = JSFunction::GetName(target_func);
- if (func_name->length() == 0) {
- func_name = isolate->factory()->InternalizeUtf8String("anonymous");
- }
- }
- JSObject::AddProperty(isolate, func_obj, func_name, func, NONE);
- JSObject::AddDataElement(func_table_obj, i, func_obj, NONE);
- }
- }
- return tables_obj;
-}
-
Handle<Object> WasmValueToValueObject(Isolate* isolate, WasmValue value) {
Handle<ByteArray> bytes;
switch (value.type().kind()) {
@@ -164,8 +110,8 @@ MaybeHandle<String> GetLocalNameString(Isolate* isolate,
ModuleWireBytes wire_bytes{native_module->wire_bytes()};
// Bounds were checked during decoding.
DCHECK(wire_bytes.BoundsCheck(name_ref));
- Vector<const char> name = wire_bytes.GetNameOrNull(name_ref);
- if (name.begin() == nullptr) return {};
+ WasmName name = wire_bytes.GetNameOrNull(name_ref);
+ if (name.size() == 0) return {};
return isolate->factory()->NewStringFromUtf8(name);
}
@@ -272,14 +218,6 @@ Handle<JSObject> GetModuleScopeObject(Handle<WasmInstanceObject> instance) {
NONE);
}
- Handle<JSObject> function_tables_obj;
- if (CreateFunctionTablesObject(instance).ToHandle(&function_tables_obj)) {
- Handle<String> tables_name = isolate->factory()->InternalizeString(
- StaticCharVector("function tables"));
- JSObject::AddProperty(isolate, module_scope_object, tables_name,
- function_tables_obj, NONE);
- }
-
auto& globals = instance->module()->globals;
if (globals.size() > 0) {
Handle<JSObject> globals_obj =
@@ -310,6 +248,9 @@ class DebugInfoImpl {
explicit DebugInfoImpl(NativeModule* native_module)
: native_module_(native_module) {}
+ DebugInfoImpl(const DebugInfoImpl&) = delete;
+ DebugInfoImpl& operator=(const DebugInfoImpl&) = delete;
+
int GetNumLocals(Address pc) {
FrameInspectionScope scope(this, pc);
if (!scope.is_inspectable()) return 0;
@@ -340,6 +281,12 @@ class DebugInfoImpl {
debug_break_fp);
}
+ const WasmFunction& GetFunctionAtAddress(Address pc) {
+ FrameInspectionScope scope(this, pc);
+ auto* module = native_module_->module();
+ return module->functions[scope.code->index()];
+ }
+
Handle<JSObject> GetLocalScopeObject(Isolate* isolate, Address pc, Address fp,
Address debug_break_fp) {
FrameInspectionScope scope(this, pc);
@@ -886,8 +833,6 @@ class DebugInfoImpl {
// Isolate-specific data.
std::unordered_map<Isolate*, PerIsolateDebugData> per_isolate_data_;
-
- DISALLOW_COPY_AND_ASSIGN(DebugInfoImpl);
};
DebugInfo::DebugInfo(NativeModule* native_module)
@@ -909,6 +854,10 @@ WasmValue DebugInfo::GetStackValue(int index, Address pc, Address fp,
return impl_->GetStackValue(index, pc, fp, debug_break_fp);
}
+const wasm::WasmFunction& DebugInfo::GetFunctionAtAddress(Address pc) {
+ return impl_->GetFunctionAtAddress(pc);
+}
+
Handle<JSObject> DebugInfo::GetLocalScopeObject(Isolate* isolate, Address pc,
Address fp,
Address debug_break_fp) {
diff --git a/deps/v8/src/wasm/wasm-debug.h b/deps/v8/src/wasm/wasm-debug.h
index 6050cb3a58..82fe974952 100644
--- a/deps/v8/src/wasm/wasm-debug.h
+++ b/deps/v8/src/wasm/wasm-debug.h
@@ -34,6 +34,7 @@ class NativeModule;
class WasmCode;
class WireBytesRef;
class WasmValue;
+struct WasmFunction;
// Side table storing information used to inspect Liftoff frames at runtime.
// This table is only created on demand for debugging, so it is not optimized
@@ -153,6 +154,9 @@ class V8_EXPORT_PRIVATE DebugInfo {
WasmValue GetLocalValue(int local, Address pc, Address fp,
Address debug_break_fp);
int GetStackDepth(Address pc);
+
+ const wasm::WasmFunction& GetFunctionAtAddress(Address pc);
+
WasmValue GetStackValue(int index, Address pc, Address fp,
Address debug_break_fp);
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 9699516c27..9f962f76bd 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -419,7 +419,7 @@ WasmEngine::~WasmEngine() {
compile_job_handles = compile_job_handles_;
}
for (auto& job_handle : compile_job_handles) {
- if (job_handle->IsRunning()) job_handle->Cancel();
+ if (job_handle->IsValid()) job_handle->Cancel();
}
// All AsyncCompileJobs have been canceled.
@@ -1036,8 +1036,7 @@ void WasmEngine::LogOutstandingCodesForIsolate(Isolate* isolate) {
DCHECK_EQ(1, isolates_.count(isolate));
code_to_log.swap(isolates_[isolate]->code_to_log);
}
- TRACE_EVENT1("v8.wasm", "wasm.LogCode", "num_code_objects",
- code_to_log.size());
+ TRACE_EVENT1("v8.wasm", "wasm.LogCode", "codeObjects", code_to_log.size());
if (code_to_log.empty()) return;
for (WasmCode* code : code_to_log) {
code->LogCode(isolate);
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 2d96111462..a38308110b 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -137,6 +137,8 @@ class NativeModuleCache {
class V8_EXPORT_PRIVATE WasmEngine {
public:
WasmEngine();
+ WasmEngine(const WasmEngine&) = delete;
+ WasmEngine& operator=(const WasmEngine&) = delete;
~WasmEngine();
// Synchronously validates the given bytes that represent an encoded Wasm
@@ -413,8 +415,6 @@ class V8_EXPORT_PRIVATE WasmEngine {
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
-
- DISALLOW_COPY_AND_ASSIGN(WasmEngine);
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index de1dd5e9df..e8e8cf8d50 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -5,12 +5,13 @@
#include <math.h>
#include <stdint.h>
#include <stdlib.h>
+
#include <limits>
#include "include/v8config.h"
-
#include "src/base/bits.h"
#include "src/base/ieee754.h"
+#include "src/base/safe_conversions.h"
#include "src/common/assert-scope.h"
#include "src/utils/memcopy.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -179,12 +180,8 @@ void uint64_to_float64_wrapper(Address data) {
}
int32_t float32_to_int64_wrapper(Address data) {
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
float input = ReadUnalignedValue<float>(data);
- if (input >= static_cast<float>(std::numeric_limits<int64_t>::min()) &&
- input < static_cast<float>(std::numeric_limits<int64_t>::max())) {
+ if (base::IsValueInRangeForNumericType<int64_t>(input)) {
WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
return 1;
}
@@ -193,11 +190,7 @@ int32_t float32_to_int64_wrapper(Address data) {
int32_t float32_to_uint64_wrapper(Address data) {
float input = ReadUnalignedValue<float>(data);
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within uint64 range which are actually
- // not within uint64 range.
- if (input > -1.0 &&
- input < static_cast<float>(std::numeric_limits<uint64_t>::max())) {
+ if (base::IsValueInRangeForNumericType<uint64_t>(input)) {
WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
return 1;
}
@@ -205,12 +198,8 @@ int32_t float32_to_uint64_wrapper(Address data) {
}
int32_t float64_to_int64_wrapper(Address data) {
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
double input = ReadUnalignedValue<double>(data);
- if (input >= static_cast<double>(std::numeric_limits<int64_t>::min()) &&
- input < static_cast<double>(std::numeric_limits<int64_t>::max())) {
+ if (base::IsValueInRangeForNumericType<int64_t>(input)) {
WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
return 1;
}
@@ -218,12 +207,8 @@ int32_t float64_to_int64_wrapper(Address data) {
}
int32_t float64_to_uint64_wrapper(Address data) {
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within uint64 range which are actually
- // not within uint64 range.
double input = ReadUnalignedValue<double>(data);
- if (input > -1.0 &&
- input < static_cast<double>(std::numeric_limits<uint64_t>::max())) {
+ if (base::IsValueInRangeForNumericType<uint64_t>(input)) {
WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
return 1;
}
@@ -232,11 +217,7 @@ int32_t float64_to_uint64_wrapper(Address data) {
void float32_to_int64_sat_wrapper(Address data) {
float input = ReadUnalignedValue<float>(data);
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
- if (input < static_cast<float>(std::numeric_limits<int64_t>::max()) &&
- input >= static_cast<float>(std::numeric_limits<int64_t>::min())) {
+ if (base::IsValueInRangeForNumericType<int64_t>(input)) {
WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
return;
}
@@ -253,11 +234,7 @@ void float32_to_int64_sat_wrapper(Address data) {
void float32_to_uint64_sat_wrapper(Address data) {
float input = ReadUnalignedValue<float>(data);
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within uint64 range which are actually
- // not within uint64 range.
- if (input < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
- input >= 0.0) {
+ if (base::IsValueInRangeForNumericType<uint64_t>(input)) {
WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
return;
}
@@ -270,11 +247,7 @@ void float32_to_uint64_sat_wrapper(Address data) {
void float64_to_int64_sat_wrapper(Address data) {
double input = ReadUnalignedValue<double>(data);
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
- if (input < static_cast<double>(std::numeric_limits<int64_t>::max()) &&
- input >= static_cast<double>(std::numeric_limits<int64_t>::min())) {
+ if (base::IsValueInRangeForNumericType<int64_t>(input)) {
WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
return;
}
@@ -291,11 +264,7 @@ void float64_to_int64_sat_wrapper(Address data) {
void float64_to_uint64_sat_wrapper(Address data) {
double input = ReadUnalignedValue<double>(data);
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
- if (input < static_cast<double>(std::numeric_limits<uint64_t>::max()) &&
- input >= 0.0) {
+ if (base::IsValueInRangeForNumericType<uint64_t>(input)) {
WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
return;
}
@@ -405,9 +374,12 @@ template <typename T, T (*float_round_op)(T)>
void simd_float_round_wrapper(Address data) {
constexpr int n = kSimd128Size / sizeof(T);
for (int i = 0; i < n; i++) {
- WriteUnalignedValue<T>(
- data + (i * sizeof(T)),
- float_round_op(ReadUnalignedValue<T>(data + (i * sizeof(T)))));
+ T input = ReadUnalignedValue<T>(data + (i * sizeof(T)));
+ T value = float_round_op(input);
+#if V8_OS_AIX
+ value = FpOpWorkaround<T>(input, value);
+#endif
+ WriteUnalignedValue<T>(data + (i * sizeof(T)), value);
}
}
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 16f4c5d3f9..4edd23eecf 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -10,25 +10,31 @@
#include "src/api/api-inl.h"
#include "src/api/api-natives.h"
#include "src/ast/ast.h"
+#include "src/base/logging.h"
#include "src/base/overflowing-math.h"
#include "src/common/assert-scope.h"
#include "src/execution/execution.h"
+#include "src/execution/frames-inl.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/init/v8.h"
+#include "src/objects/js-collection-inl.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/templates.h"
#include "src/parsing/parse-info.h"
#include "src/tasks/task-utils.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/function-compiler.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/value-type.h"
+#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-serialization.h"
+#include "src/wasm/wasm-value.h"
using v8::internal::wasm::ErrorThrower;
using v8::internal::wasm::ScheduledErrorThrower;
@@ -102,7 +108,7 @@ WasmStreaming::WasmStreaming(std::unique_ptr<WasmStreamingImpl> impl)
WasmStreaming::~WasmStreaming() = default;
void WasmStreaming::OnBytesReceived(const uint8_t* bytes, size_t size) {
- TRACE_EVENT1("v8.wasm", "wasm.OnBytesReceived", "num_bytes", size);
+ TRACE_EVENT1("v8.wasm", "wasm.OnBytesReceived", "bytes", size);
impl_->OnBytesReceived(bytes, size);
}
@@ -1581,7 +1587,7 @@ constexpr const char* kName_WasmTableObject = "WebAssembly.Table";
}
void WebAssemblyInstanceGetExports(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
@@ -2020,9 +2026,9 @@ Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
}
Handle<JSFunction> InstallConstructorFunc(Isolate* isolate,
- Handle<JSObject> object,
- const char* str,
- FunctionCallback func) {
+ Handle<JSObject> object,
+ const char* str,
+ FunctionCallback func) {
return InstallFunc(isolate, object, str, func, 1, true, DONT_ENUM);
}
@@ -2281,6 +2287,775 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
runtime_error, DONT_ENUM);
}
+namespace {
+void SetMapValue(Isolate* isolate, Handle<JSMap> map, Handle<Object> key,
+ Handle<Object> value) {
+ DCHECK(!map.is_null() && !key.is_null() && !value.is_null());
+ Handle<Object> argv[] = {key, value};
+ Execution::CallBuiltin(isolate, isolate->map_set(), map, arraysize(argv),
+ argv)
+ .Check();
+}
+
+Handle<Object> GetMapValue(Isolate* isolate, Handle<JSMap> map,
+ Handle<Object> key) {
+ DCHECK(!map.is_null() && !key.is_null());
+ Handle<Object> argv[] = {key};
+ return Execution::CallBuiltin(isolate, isolate->map_get(), map,
+ arraysize(argv), argv)
+ .ToHandleChecked();
+}
+
+// Look up a name in a name table. Name tables are stored under the "names"
+// property of the handler and map names to index.
+base::Optional<int> ResolveValueSelector(Isolate* isolate,
+ Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ size_t index = 0;
+ if (enable_index_lookup && property->AsIntegerIndex(&index) &&
+ index < kMaxInt) {
+ return static_cast<int>(index);
+ }
+
+ Handle<Object> name_table =
+ JSObject::GetProperty(isolate, handler, "names").ToHandleChecked();
+ DCHECK(name_table->IsJSMap());
+
+ Handle<Object> object =
+ GetMapValue(isolate, Handle<JSMap>::cast(name_table), property);
+ if (object->IsUndefined()) return {};
+ DCHECK(object->IsNumeric());
+ return NumberToInt32(*object);
+}
+
+// Helper for unpacking a maybe name that makes a default with an index if
+// the name is empty. If the name is not empty, it's prefixed with a $.
+Handle<String> GetNameOrDefault(Isolate* isolate,
+ MaybeHandle<String> maybe_name,
+ const char* default_name_prefix, int index) {
+ Handle<String> name;
+ if (maybe_name.ToHandle(&name)) {
+ return isolate->factory()
+ ->NewConsString(isolate->factory()->NewStringFromAsciiChecked("$"),
+ name)
+ .ToHandleChecked();
+ }
+
+ // Maximum length of the default names: $memory-2147483648\0
+ static constexpr int kMaxStrLen = 19;
+ EmbeddedVector<char, kMaxStrLen> value;
+ DCHECK_LT(strlen(default_name_prefix) + /*strlen(kMinInt)*/ 11, kMaxStrLen);
+ int len = SNPrintF(value, "%s%d", default_name_prefix, index);
+ return isolate->factory()->InternalizeString(value.SubVector(0, len));
+}
+
+// Generate names for the locals. Names either come from the name table,
+// otherwise the default $varX is used.
+std::vector<Handle<String>> GetLocalNames(Handle<WasmInstanceObject> instance,
+ Address pc) {
+ wasm::NativeModule* native_module = instance->module_object().native_module();
+ wasm::DebugInfo* debug_info = native_module->GetDebugInfo();
+ int num_locals = debug_info->GetNumLocals(pc);
+ auto* isolate = instance->GetIsolate();
+
+ wasm::ModuleWireBytes module_wire_bytes(
+ instance->module_object().native_module()->wire_bytes());
+ const wasm::WasmFunction& function = debug_info->GetFunctionAtAddress(pc);
+
+ std::vector<Handle<String>> names;
+ for (int i = 0; i < num_locals; ++i) {
+ wasm::WireBytesRef local_name_ref =
+ debug_info->GetLocalName(function.func_index, i);
+ DCHECK(module_wire_bytes.BoundsCheck(local_name_ref));
+ Vector<const char> name_vec =
+ module_wire_bytes.GetNameOrNull(local_name_ref);
+ names.emplace_back(GetNameOrDefault(
+ isolate,
+ name_vec.empty() ? MaybeHandle<String>()
+ : isolate->factory()->NewStringFromUtf8(name_vec),
+ "$var", i));
+ }
+
+ return names;
+}
+
+// Generate names for the globals. Names either come from the name table,
+// otherwise the default $globalX is used.
+std::vector<Handle<String>> GetGlobalNames(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ auto& globals = instance->module()->globals;
+ std::vector<Handle<String>> names;
+ for (uint32_t i = 0; i < globals.size(); ++i) {
+ names.emplace_back(GetNameOrDefault(
+ isolate, WasmInstanceObject::GetGlobalNameOrNull(isolate, instance, i),
+ "$global", i));
+ }
+ return names;
+}
+
+// Generate names for the functions.
+std::vector<Handle<String>> GetFunctionNames(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ auto* module = instance->module();
+
+ wasm::ModuleWireBytes wire_bytes(
+ instance->module_object().native_module()->wire_bytes());
+
+ std::vector<Handle<String>> names;
+ for (auto& function : module->functions) {
+ DCHECK_EQ(function.func_index, names.size());
+ wasm::WireBytesRef name_ref =
+ module->lazily_generated_names.LookupFunctionName(
+ wire_bytes, function.func_index, VectorOf(module->export_table));
+ DCHECK(wire_bytes.BoundsCheck(name_ref));
+ Vector<const char> name_vec = wire_bytes.GetNameOrNull(name_ref);
+ names.emplace_back(GetNameOrDefault(
+ isolate,
+ name_vec.empty() ? MaybeHandle<String>()
+ : isolate->factory()->NewStringFromUtf8(name_vec),
+ "$func", function.func_index));
+ }
+
+ return names;
+}
+
+// Generate names for the imports.
+std::vector<Handle<String>> GetImportNames(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ const wasm::WasmModule* module = instance->module();
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ int num_imports = static_cast<int>(module->import_table.size());
+
+ std::vector<Handle<String>> names;
+ for (int index = 0; index < num_imports; ++index) {
+ const wasm::WasmImport& import = module->import_table[index];
+
+ names.emplace_back(WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate, module_object, import.field_name, kInternalize));
+ }
+
+ return names;
+}
+
+// Generate names for the memories.
+std::vector<Handle<String>> GetMemoryNames(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+
+ std::vector<Handle<String>> names;
+ uint32_t memory_count = instance->has_memory_object() ? 1 : 0;
+ for (uint32_t memory_index = 0; memory_index < memory_count; ++memory_index) {
+ names.emplace_back(GetNameOrDefault(isolate,
+ WasmInstanceObject::GetMemoryNameOrNull(
+ isolate, instance, memory_index),
+ "$memory", memory_index));
+ }
+
+ return names;
+}
+
+// Generate names for the tables.
+std::vector<Handle<String>> GetTableNames(Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ auto tables = handle(instance->tables(), isolate);
+
+ std::vector<Handle<String>> names;
+ for (int table_index = 0; table_index < tables->length(); ++table_index) {
+ auto func_table =
+ handle(WasmTableObject::cast(tables->get(table_index)), isolate);
+ if (!func_table->type().is_reference_to(wasm::HeapType::kFunc)) continue;
+
+ names.emplace_back(GetNameOrDefault(
+ isolate,
+ WasmInstanceObject::GetTableNameOrNull(isolate, instance, table_index),
+ "$table", table_index));
+ }
+ return names;
+}
+
+// Generate names for the exports
+std::vector<Handle<String>> GetExportNames(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ const wasm::WasmModule* module = instance->module();
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ int num_exports = static_cast<int>(module->export_table.size());
+
+ std::vector<Handle<String>> names;
+
+ for (int index = 0; index < num_exports; ++index) {
+ const wasm::WasmExport& exp = module->export_table[index];
+
+ names.emplace_back(WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate, module_object, exp.name, kInternalize));
+ }
+ return names;
+}
+
+Handle<WasmInstanceObject> GetInstance(Isolate* isolate,
+ Handle<JSObject> handler) {
+ Handle<Object> instance =
+ JSObject::GetProperty(isolate, handler, "instance").ToHandleChecked();
+ DCHECK(instance->IsWasmInstanceObject());
+ return Handle<WasmInstanceObject>::cast(instance);
+}
+
+Address GetPC(Isolate* isolate, Handle<JSObject> handler) {
+ Handle<Object> pc =
+ JSObject::GetProperty(isolate, handler, "pc").ToHandleChecked();
+ DCHECK(pc->IsBigInt());
+ return Handle<BigInt>::cast(pc)->AsUint64();
+}
+
+Address GetFP(Isolate* isolate, Handle<JSObject> handler) {
+ Handle<Object> fp =
+ JSObject::GetProperty(isolate, handler, "fp").ToHandleChecked();
+ DCHECK(fp->IsBigInt());
+ return Handle<BigInt>::cast(fp)->AsUint64();
+}
+
+Address GetCalleeFP(Isolate* isolate, Handle<JSObject> handler) {
+ Handle<Object> callee_fp =
+ JSObject::GetProperty(isolate, handler, "callee_fp").ToHandleChecked();
+ DCHECK(callee_fp->IsBigInt());
+ return Handle<BigInt>::cast(callee_fp)->AsUint64();
+}
+
+// Convert a WasmValue to an appropriate JS representation.
+static Handle<Object> WasmValueToObject(Isolate* isolate,
+ wasm::WasmValue value) {
+ auto* factory = isolate->factory();
+ switch (value.type().kind()) {
+ case wasm::ValueType::kI32:
+ return factory->NewNumberFromInt(value.to_i32());
+ case wasm::ValueType::kI64:
+ return BigInt::FromInt64(isolate, value.to_i64());
+ case wasm::ValueType::kF32:
+ return factory->NewNumber(value.to_f32());
+ case wasm::ValueType::kF64:
+ return factory->NewNumber(value.to_f64());
+ case wasm::ValueType::kS128: {
+ wasm::Simd128 s128 = value.to_s128();
+ Handle<JSArrayBuffer> buffer;
+ if (!isolate->factory()
+ ->NewJSArrayBufferAndBackingStore(
+ kSimd128Size, InitializedFlag::kUninitialized)
+ .ToHandle(&buffer)) {
+ isolate->FatalProcessOutOfHeapMemory(
+ "failed to allocate backing store");
+ }
+
+ memcpy(buffer->allocation_base(), s128.bytes(), buffer->byte_length());
+ return isolate->factory()->NewJSTypedArray(kExternalUint8Array, buffer, 0,
+ buffer->byte_length());
+ }
+ case wasm::ValueType::kRef:
+ return value.to_externref();
+ default:
+ break;
+ }
+ return factory->undefined_value();
+}
+
+base::Optional<int> HasLocalImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+ Address pc = GetPC(isolate, handler);
+
+ wasm::DebugInfo* debug_info =
+ instance->module_object().native_module()->GetDebugInfo();
+ int num_locals = debug_info->GetNumLocals(pc);
+ if (0 <= index && index < num_locals) return index;
+ return {};
+}
+
+Handle<Object> GetLocalImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Factory* factory = isolate->factory();
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+
+ base::Optional<int> index =
+ HasLocalImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return factory->undefined_value();
+ Address pc = GetPC(isolate, handler);
+ Address fp = GetFP(isolate, handler);
+ Address callee_fp = GetCalleeFP(isolate, handler);
+
+ wasm::DebugInfo* debug_info =
+ instance->module_object().native_module()->GetDebugInfo();
+ wasm::WasmValue value = debug_info->GetLocalValue(*index, pc, fp, callee_fp);
+ return WasmValueToObject(isolate, value);
+}
+
+base::Optional<int> HasGlobalImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+
+ const std::vector<wasm::WasmGlobal>& globals = instance->module()->globals;
+ if (globals.size() <= kMaxInt && 0 <= *index &&
+ *index < static_cast<int>(globals.size())) {
+ return index;
+ }
+ return {};
+}
+
+Handle<Object> GetGlobalImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasGlobalImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return isolate->factory()->undefined_value();
+
+ const std::vector<wasm::WasmGlobal>& globals = instance->module()->globals;
+ return WasmValueToObject(
+ isolate, WasmInstanceObject::GetGlobalValue(instance, globals[*index]));
+}
+
+base::Optional<int> HasMemoryImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (index && *index == 0 && instance->has_memory_object()) return index;
+ return {};
+}
+
+Handle<Object> GetMemoryImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasMemoryImpl(isolate, property, handler, enable_index_lookup);
+ if (index) return handle(instance->memory_object(), isolate);
+ return isolate->factory()->undefined_value();
+}
+
+base::Optional<int> HasFunctionImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+ const std::vector<wasm::WasmFunction>& functions =
+ instance->module()->functions;
+ if (functions.size() <= kMaxInt && 0 <= *index &&
+ *index < static_cast<int>(functions.size())) {
+ return index;
+ }
+ return {};
+}
+
+Handle<Object> GetFunctionImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasFunctionImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return isolate->factory()->undefined_value();
+
+ return WasmInstanceObject::GetOrCreateWasmExternalFunction(isolate, instance,
+ *index);
+}
+
+base::Optional<int> HasTableImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+ Handle<FixedArray> tables(instance->tables(), isolate);
+ int num_tables = tables->length();
+ if (*index < 0 || *index >= num_tables) return {};
+
+ Handle<WasmTableObject> func_table(WasmTableObject::cast(tables->get(*index)),
+ isolate);
+ if (func_table->type().is_reference_to(wasm::HeapType::kFunc)) return index;
+ return {};
+}
+
+Handle<Object> GetTableImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasTableImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return isolate->factory()->undefined_value();
+
+ Handle<WasmTableObject> func_table(
+ WasmTableObject::cast(instance->tables().get(*index)), isolate);
+ return func_table;
+}
+
+base::Optional<int> HasImportImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+ const wasm::WasmModule* module = instance->module();
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ int num_imports = static_cast<int>(module->import_table.size());
+ if (0 <= *index && *index < num_imports) return index;
+ return {};
+}
+
+Handle<JSObject> GetExternalObject(Isolate* isolate,
+ wasm::ImportExportKindCode kind,
+ uint32_t index) {
+ Handle<JSObject> result = isolate->factory()->NewJSObjectWithNullProto();
+ Handle<Object> value = isolate->factory()->NewNumberFromUint(index);
+ switch (kind) {
+ case wasm::kExternalFunction:
+ JSObject::AddProperty(isolate, result, "func", value, NONE);
+ break;
+ case wasm::kExternalGlobal:
+ JSObject::AddProperty(isolate, result, "global", value, NONE);
+ break;
+ case wasm::kExternalTable:
+ JSObject::AddProperty(isolate, result, "table", value, NONE);
+ break;
+ case wasm::kExternalMemory:
+ JSObject::AddProperty(isolate, result, "mem", value, NONE);
+ break;
+ case wasm::kExternalException:
+ JSObject::AddProperty(isolate, result, "exn", value, NONE);
+ break;
+ }
+ return result;
+}
+
+Handle<Object> GetImportImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasImportImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return isolate->factory()->undefined_value();
+
+ const wasm::WasmImport& imp = instance->module()->import_table[*index];
+ return GetExternalObject(isolate, imp.kind, imp.index);
+}
+
+base::Optional<int> HasExportImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+
+ const wasm::WasmModule* module = instance->module();
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ int num_exports = static_cast<int>(module->export_table.size());
+ if (0 <= *index && *index < num_exports) return index;
+ return {};
+}
+
+Handle<Object> GetExportImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasExportImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return isolate->factory()->undefined_value();
+
+ const wasm::WasmExport& exp = instance->module()->export_table[*index];
+ return GetExternalObject(isolate, exp.kind, exp.index);
+}
+
+// Generic has trap callback for the index space proxies.
+template <base::Optional<int> Impl(Isolate*, Handle<Name>, Handle<JSObject>,
+ bool)>
+void HasTrapCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DCHECK_GE(args.Length(), 2);
+ Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
+ DCHECK(args.This()->IsObject());
+ Handle<JSObject> handler =
+ Handle<JSObject>::cast(Utils::OpenHandle(*args.This()));
+
+ DCHECK(args[1]->IsName());
+ Handle<Name> property = Handle<Name>::cast(Utils::OpenHandle(*args[1]));
+ args.GetReturnValue().Set(Impl(isolate, property, handler, true).has_value());
+}
+
+// Generic get trap callback for the index space proxies.
+template <Handle<Object> Impl(Isolate*, Handle<Name>, Handle<JSObject>, bool)>
+void GetTrapCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DCHECK_GE(args.Length(), 2);
+ Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
+ DCHECK(args.This()->IsObject());
+ Handle<JSObject> handler =
+ Handle<JSObject>::cast(Utils::OpenHandle(*args.This()));
+
+ DCHECK(args[1]->IsName());
+ Handle<Name> property = Handle<Name>::cast(Utils::OpenHandle(*args[1]));
+ args.GetReturnValue().Set(
+ Utils::ToLocal(Impl(isolate, property, handler, true)));
+}
+
+template <typename ReturnT>
+ReturnT DelegateToplevelCall(Isolate* isolate, Handle<JSObject> target,
+ Handle<Name> property, const char* index_space,
+ ReturnT (*impl)(Isolate*, Handle<Name>,
+ Handle<JSObject>, bool)) {
+ Handle<Object> namespace_proxy =
+ JSObject::GetProperty(isolate, target, index_space).ToHandleChecked();
+ DCHECK(namespace_proxy->IsJSProxy());
+ Handle<JSObject> namespace_handler(
+ JSObject::cast(Handle<JSProxy>::cast(namespace_proxy)->handler()),
+ isolate);
+ return impl(isolate, property, namespace_handler, false);
+}
+
+template <typename ReturnT>
+using DelegateCallback = ReturnT (*)(Isolate*, Handle<Name>, Handle<JSObject>,
+ bool);
+
+// Has trap callback for the top-level proxy.
+void ToplevelHasTrapCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DCHECK_GE(args.Length(), 2);
+ Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
+ DCHECK(args[0]->IsObject());
+ Handle<JSObject> target = Handle<JSObject>::cast(Utils::OpenHandle(*args[0]));
+
+ DCHECK(args[1]->IsName());
+ Handle<Name> property = Handle<Name>::cast(Utils::OpenHandle(*args[1]));
+
+ // First check if the property exists on the target.
+ if (JSObject::HasProperty(target, property).FromMaybe(false)) {
+ args.GetReturnValue().Set(true);
+ return;
+ }
+
+ // Now check the index space proxies in order if they know the property.
+ constexpr std::pair<const char*, DelegateCallback<base::Optional<int>>>
+ kDelegates[] = {{"memories", HasMemoryImpl},
+ {"locals", HasLocalImpl},
+ {"tables", HasTableImpl},
+ {"functions", HasFunctionImpl},
+ {"globals", HasGlobalImpl}};
+ for (auto& delegate : kDelegates) {
+ if (DelegateToplevelCall(isolate, target, property, delegate.first,
+ delegate.second)) {
+ args.GetReturnValue().Set(true);
+ return;
+ }
+ args.GetReturnValue().Set(false);
+ }
+}
+
+// Get trap callback for the top-level proxy.
+void ToplevelGetTrapCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DCHECK_GE(args.Length(), 2);
+ Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
+ DCHECK(args[0]->IsObject());
+ Handle<JSObject> target = Handle<JSObject>::cast(Utils::OpenHandle(*args[0]));
+
+ DCHECK(args[1]->IsName());
+ Handle<Name> property = Handle<Name>::cast(Utils::OpenHandle(*args[1]));
+
+ // First, check if the property is a proper property on the target. If so,
+ // return its value.
+ Handle<Object> value =
+ JSObject::GetProperty(isolate, target, property).ToHandleChecked();
+ if (!value->IsUndefined()) {
+ args.GetReturnValue().Set(Utils::ToLocal(value));
+ return;
+ }
+
+ // Try the index space proxies in the correct disambiguation order.
+ constexpr std::pair<const char*, DelegateCallback<Handle<Object>>>
+ kDelegates[] = {{"memories", GetMemoryImpl},
+ {"locals", GetLocalImpl},
+ {"tables", GetTableImpl},
+ {"functions", GetFunctionImpl},
+ {"globals", GetGlobalImpl}};
+ for (auto& delegate : kDelegates) {
+ value = DelegateToplevelCall(isolate, target, property, delegate.first,
+ delegate.second);
+ if (!value->IsUndefined()) {
+ args.GetReturnValue().Set(Utils::ToLocal(value));
+ return;
+ }
+ }
+}
+
+// Populate a JSMap with name->index mappings from an ordered list of names.
+Handle<JSMap> GetNameTable(Isolate* isolate,
+ const std::vector<Handle<String>>& names) {
+ Factory* factory = isolate->factory();
+ Handle<JSMap> name_table = factory->NewJSMap();
+
+ for (size_t i = 0; i < names.size(); ++i) {
+ SetMapValue(isolate, name_table, names[i], factory->NewNumberFromInt64(i));
+ }
+ return name_table;
+}
+
+// Produce a JSProxy with a given name table and get and has trap handlers.
+Handle<JSProxy> GetJSProxy(
+ WasmFrame* frame, Handle<JSMap> name_table,
+ void (*get_callback)(const v8::FunctionCallbackInfo<v8::Value>&),
+ void (*has_callback)(const v8::FunctionCallbackInfo<v8::Value>&)) {
+ Isolate* isolate = frame->isolate();
+ Factory* factory = isolate->factory();
+ Handle<JSObject> target = factory->NewJSObjectWithNullProto();
+ Handle<JSObject> handler = factory->NewJSObjectWithNullProto();
+
+ // Besides the name table, the get and has traps need access to the instance
+ // and frame information.
+ JSObject::AddProperty(isolate, handler, "names", name_table, DONT_ENUM);
+ Handle<WasmInstanceObject> instance(frame->wasm_instance(), isolate);
+ JSObject::AddProperty(isolate, handler, "instance", instance, DONT_ENUM);
+ Handle<BigInt> pc = BigInt::FromInt64(isolate, frame->pc());
+ JSObject::AddProperty(isolate, handler, "pc", pc, DONT_ENUM);
+ Handle<BigInt> fp = BigInt::FromInt64(isolate, frame->fp());
+ JSObject::AddProperty(isolate, handler, "fp", fp, DONT_ENUM);
+ Handle<BigInt> callee_fp = BigInt::FromInt64(isolate, frame->callee_fp());
+ JSObject::AddProperty(isolate, handler, "callee_fp", callee_fp, DONT_ENUM);
+
+ InstallFunc(isolate, handler, "get", get_callback, 3, false, READ_ONLY);
+ InstallFunc(isolate, handler, "has", has_callback, 2, false, READ_ONLY);
+
+ return factory->NewJSProxy(target, handler);
+}
+
+Handle<JSObject> GetStackObject(WasmFrame* frame) {
+ Isolate* isolate = frame->isolate();
+ Handle<JSObject> object = isolate->factory()->NewJSObjectWithNullProto();
+ wasm::DebugInfo* debug_info =
+ frame->wasm_instance().module_object().native_module()->GetDebugInfo();
+ int num_values = debug_info->GetStackDepth(frame->pc());
+ for (int i = 0; i < num_values; ++i) {
+ wasm::WasmValue value = debug_info->GetStackValue(
+ i, frame->pc(), frame->fp(), frame->callee_fp());
+ JSObject::AddDataElement(object, i, WasmValueToObject(isolate, value),
+ NONE);
+ }
+ return object;
+}
+} // namespace
+
+// This function generates the JS debug proxy for a given Wasm frame. The debug
+// proxy is used when evaluating debug JS expressions on a wasm frame and let's
+// the developer inspect the engine state from JS. The proxy provides the
+// following interface:
+//
+// type WasmSimdValue = Uint8Array;
+// type WasmValue = number | bigint | object | WasmSimdValue;
+// type WasmFunction = (... args : WasmValue[]) = > WasmValue;
+// type WasmExport = {name : string} & ({func : number} | {table : number} |
+// {mem : number} | {global : number});
+// type WasmImport = {name : string, module : string} &
+// ({func : number} | {table : number} | {mem : number} |
+// {global : number});
+// interface WasmInterface {
+// $globalX: WasmValue;
+// $varX: WasmValue;
+// $funcX(a : WasmValue /*, ...*/) : WasmValue;
+// readonly $memoryX : WebAssembly.Memory;
+// readonly $tableX : WebAssembly.Table;
+// readonly memories : {[nameOrIndex:string | number] : WebAssembly.Memory};
+// readonly tables : {[nameOrIndex:string | number] : WebAssembly.Table};
+// readonly stack : WasmValue[];
+// readonly imports : {[nameOrIndex:string | number] : WasmImport};
+// readonly exports : {[nameOrIndex:string | number] : WasmExport};
+// readonly globals : {[nameOrIndex:string | number] : WasmValue};
+// readonly locals : {[nameOrIndex:string | number] : WasmValue};
+// readonly functions : {[nameOrIndex:string | number] : WasmFunction};
+// }
+//
+// The wasm index spaces memories, tables, imports, exports, globals, locals
+// functions are JSProxies that lazily produce values either by index or by
+// name. A top level JSProxy is wrapped around those for top-level lookup of
+// names in the disambiguation order  memory, local, table, function, global.
+// Import and export names are not globally resolved.
+
+Handle<JSProxy> WasmJs::GetJSDebugProxy(WasmFrame* frame) {
+ Isolate* isolate = frame->isolate();
+ Factory* factory = isolate->factory();
+ Handle<WasmInstanceObject> instance(frame->wasm_instance(), isolate);
+
+ // The top level proxy delegates lookups to the index space proxies.
+ Handle<JSObject> handler = factory->NewJSObjectWithNullProto();
+ InstallFunc(isolate, handler, "get", ToplevelGetTrapCallback, 3, false,
+ READ_ONLY);
+ InstallFunc(isolate, handler, "has", ToplevelHasTrapCallback, 2, false,
+ READ_ONLY);
+
+ Handle<JSObject> target = factory->NewJSObjectWithNullProto();
+
+ // Generate JSMaps per index space for name->index lookup. Every index space
+ // proxy is associated with its table for local name lookup.
+
+ auto local_name_table =
+ GetNameTable(isolate, GetLocalNames(instance, frame->pc()));
+ auto locals =
+ GetJSProxy(frame, local_name_table, GetTrapCallback<GetLocalImpl>,
+ HasTrapCallback<HasLocalImpl>);
+ JSObject::AddProperty(isolate, target, "locals", locals, READ_ONLY);
+
+ auto global_name_table = GetNameTable(isolate, GetGlobalNames(instance));
+ auto globals =
+ GetJSProxy(frame, global_name_table, GetTrapCallback<GetGlobalImpl>,
+ HasTrapCallback<HasGlobalImpl>);
+ JSObject::AddProperty(isolate, target, "globals", globals, READ_ONLY);
+
+ auto function_name_table = GetNameTable(isolate, GetFunctionNames(instance));
+ auto functions =
+ GetJSProxy(frame, function_name_table, GetTrapCallback<GetFunctionImpl>,
+ HasTrapCallback<HasFunctionImpl>);
+ JSObject::AddProperty(isolate, target, "functions", functions, READ_ONLY);
+
+ auto memory_name_table = GetNameTable(isolate, GetMemoryNames(instance));
+ auto memories =
+ GetJSProxy(frame, memory_name_table, GetTrapCallback<GetMemoryImpl>,
+ HasTrapCallback<HasMemoryImpl>);
+ JSObject::AddProperty(isolate, target, "memories", memories, READ_ONLY);
+
+ auto table_name_table = GetNameTable(isolate, GetTableNames(instance));
+ auto tables =
+ GetJSProxy(frame, table_name_table, GetTrapCallback<GetTableImpl>,
+ HasTrapCallback<HasTableImpl>);
+ JSObject::AddProperty(isolate, target, "tables", tables, READ_ONLY);
+
+ auto import_name_table = GetNameTable(isolate, GetImportNames(instance));
+ auto imports =
+ GetJSProxy(frame, import_name_table, GetTrapCallback<GetImportImpl>,
+ HasTrapCallback<HasImportImpl>);
+ JSObject::AddProperty(isolate, target, "imports", imports, READ_ONLY);
+
+ auto export_name_table = GetNameTable(isolate, GetExportNames(instance));
+ auto exports =
+ GetJSProxy(frame, export_name_table, GetTrapCallback<GetExportImpl>,
+ HasTrapCallback<HasExportImpl>);
+ JSObject::AddProperty(isolate, target, "exports", exports, READ_ONLY);
+
+ auto stack = GetStackObject(frame);
+ JSObject::AddProperty(isolate, target, "stack", stack, READ_ONLY);
+
+ return factory->NewJSProxy(target, handler);
+}
+
#undef ASSIGN
#undef EXTRACT_THIS
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
index 6f83ad6326..4c9ae9645b 100644
--- a/deps/v8/src/wasm/wasm-js.h
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -9,6 +9,8 @@
namespace v8 {
namespace internal {
+class JSProxy;
+class WasmFrame;
namespace wasm {
class StreamingDecoder;
@@ -19,6 +21,8 @@ class WasmJs {
public:
V8_EXPORT_PRIVATE static void Install(Isolate* isolate,
bool exposed_on_global_object);
+
+ V8_EXPORT_PRIVATE static Handle<JSProxy> GetJSDebugProxy(WasmFrame* frame);
};
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index de895e6429..f2e7d63f52 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -597,7 +597,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
size_t start = EmitSection(kTableSectionCode, buffer);
buffer->write_size(tables_.size());
for (const WasmTable& table : tables_) {
- buffer->write_u8(table.type.value_type_code());
+ WriteValueType(buffer, table.type);
buffer->write_u8(table.has_maximum ? kWithMaximum : kNoMaximum);
buffer->write_size(table.min_size);
if (table.has_maximum) buffer->write_size(table.max_size);
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index fdd64950df..7d6df375aa 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -236,6 +236,8 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
public:
explicit WasmModuleBuilder(Zone* zone);
+ WasmModuleBuilder(const WasmModuleBuilder&) = delete;
+ WasmModuleBuilder& operator=(const WasmModuleBuilder&) = delete;
// Building methods.
uint32_t AddImport(Vector<const char> name, FunctionSig* sig,
@@ -361,8 +363,6 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
// Indirect functions must be allocated before adding extra tables.
bool allocating_indirect_functions_allowed_ = true;
#endif
-
- DISALLOW_COPY_AND_ASSIGN(WasmModuleBuilder);
};
inline FunctionSig* WasmFunctionBuilder::signature() {
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 3d935f27be..afe192a3d3 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -137,7 +137,7 @@ void LazilyGeneratedNames::AddForTesting(int function_index,
AsmJsOffsetInformation::AsmJsOffsetInformation(
Vector<const byte> encoded_offsets)
- : encoded_offsets_(OwnedVector<uint8_t>::Of(encoded_offsets)) {}
+ : encoded_offsets_(OwnedVector<const uint8_t>::Of(encoded_offsets)) {}
AsmJsOffsetInformation::~AsmJsOffsetInformation() = default;
@@ -618,10 +618,11 @@ size_t EstimateStoredSize(const WasmModule* module) {
(module->signature_zone ? module->signature_zone->allocation_size()
: 0) +
VectorSize(module->types) + VectorSize(module->type_kinds) +
- VectorSize(module->signature_ids) + VectorSize(module->functions) +
- VectorSize(module->data_segments) + VectorSize(module->tables) +
- VectorSize(module->import_table) + VectorSize(module->export_table) +
- VectorSize(module->exceptions) + VectorSize(module->elem_segments);
+ VectorSize(module->canonicalized_type_ids) +
+ VectorSize(module->functions) + VectorSize(module->data_segments) +
+ VectorSize(module->tables) + VectorSize(module->import_table) +
+ VectorSize(module->export_table) + VectorSize(module->exceptions) +
+ VectorSize(module->elem_segments);
}
size_t PrintSignature(Vector<char> buffer, const wasm::FunctionSig* sig,
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 2ffc92e390..9c54f17b9c 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -266,6 +266,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t maximum_pages = 0; // maximum size of the memory in 64k pages
bool has_shared_memory = false; // true if memory is a SharedArrayBuffer
bool has_maximum_pages = false; // true if there is a maximum memory size
+ bool is_memory64 = false; // true if the memory is 64 bit
bool has_memory = false; // true if the memory was defined or imported
bool mem_export = false; // true if the memory is exported
int start_function_index = -1; // start function, >= 0 if any
@@ -283,9 +284,12 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t num_declared_data_segments = 0; // From the DataCount section.
WireBytesRef code = {0, 0};
WireBytesRef name = {0, 0};
- std::vector<TypeDefinition> types; // by type index
- std::vector<uint8_t> type_kinds; // by type index
- std::vector<uint32_t> signature_ids; // by signature index
+ std::vector<TypeDefinition> types; // by type index
+ std::vector<uint8_t> type_kinds; // by type index
+ // Map from each type index to the index of its corresponding canonical type.
+ // Note: right now, only functions are canonicalized, and arrays and structs
+ // map to themselves.
+ std::vector<uint32_t> canonicalized_type_ids;
bool has_type(uint32_t index) const { return index < types.size(); }
@@ -293,37 +297,43 @@ struct V8_EXPORT_PRIVATE WasmModule {
types.push_back(TypeDefinition(sig));
type_kinds.push_back(kWasmFunctionTypeCode);
uint32_t canonical_id = sig ? signature_map.FindOrInsert(*sig) : 0;
- signature_ids.push_back(canonical_id);
- }
- const FunctionSig* signature(uint32_t index) const {
- DCHECK(type_kinds[index] == kWasmFunctionTypeCode);
- return types[index].function_sig;
+ canonicalized_type_ids.push_back(canonical_id);
}
bool has_signature(uint32_t index) const {
return index < types.size() && type_kinds[index] == kWasmFunctionTypeCode;
}
+ const FunctionSig* signature(uint32_t index) const {
+ DCHECK(has_signature(index));
+ return types[index].function_sig;
+ }
+
void add_struct_type(const StructType* type) {
types.push_back(TypeDefinition(type));
type_kinds.push_back(kWasmStructTypeCode);
- }
- const StructType* struct_type(uint32_t index) const {
- DCHECK(type_kinds[index] == kWasmStructTypeCode);
- return types[index].struct_type;
+ // No canonicalization for structs.
+ canonicalized_type_ids.push_back(0);
}
bool has_struct(uint32_t index) const {
return index < types.size() && type_kinds[index] == kWasmStructTypeCode;
}
+ const StructType* struct_type(uint32_t index) const {
+ DCHECK(has_struct(index));
+ return types[index].struct_type;
+ }
+
void add_array_type(const ArrayType* type) {
types.push_back(TypeDefinition(type));
type_kinds.push_back(kWasmArrayTypeCode);
- }
- const ArrayType* array_type(uint32_t index) const {
- DCHECK(type_kinds[index] == kWasmArrayTypeCode);
- return types[index].array_type;
+ // No canonicalization for arrays.
+ canonicalized_type_ids.push_back(0);
}
bool has_array(uint32_t index) const {
return index < types.size() && type_kinds[index] == kWasmArrayTypeCode;
}
+ const ArrayType* array_type(uint32_t index) const {
+ DCHECK(has_array(index));
+ return types[index].array_type;
+ }
std::vector<WasmFunction> functions;
std::vector<WasmDataSegment> data_segments;
@@ -344,9 +354,8 @@ struct V8_EXPORT_PRIVATE WasmModule {
std::unique_ptr<AsmJsOffsetInformation> asm_js_offset_information;
explicit WasmModule(std::unique_ptr<Zone> signature_zone = nullptr);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(WasmModule);
+ WasmModule(const WasmModule&) = delete;
+ WasmModule& operator=(const WasmModule&) = delete;
};
// Static representation of a wasm indirect call table.
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 984c6d0f5b..744a16c855 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -28,6 +28,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/wasm/wasm-objects-tq-inl.inc"
+
OBJECT_CONSTRUCTORS_IMPL(WasmExceptionObject, JSObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmExceptionTag)
OBJECT_CONSTRUCTORS_IMPL(WasmExportedFunctionData, Struct)
@@ -58,7 +60,8 @@ CAST_ACCESSOR(WasmArray)
Object value = TaggedField<Object, offset>::load(isolate, *this); \
return !value.IsUndefined(GetReadOnlyRoots(isolate)); \
} \
- ACCESSORS(holder, name, type, offset)
+ ACCESSORS_CHECKED2(holder, name, type, offset, \
+ !value.IsUndefined(GetReadOnlyRoots(isolate)), true)
#define PRIMITIVE_ACCESSORS(holder, name, type, offset) \
type holder::name() const { \
@@ -329,11 +332,12 @@ ACCESSORS(WasmExportedFunctionData, instance, WasmInstanceObject,
SMI_ACCESSORS(WasmExportedFunctionData, jump_table_offset,
kJumpTableOffsetOffset)
SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset)
+ACCESSORS(WasmExportedFunctionData, signature, Foreign, kSignatureOffset)
+SMI_ACCESSORS(WasmExportedFunctionData, call_count, kCallCountOffset)
ACCESSORS(WasmExportedFunctionData, c_wrapper_code, Object, kCWrapperCodeOffset)
ACCESSORS(WasmExportedFunctionData, wasm_call_target, Object,
kWasmCallTargetOffset)
SMI_ACCESSORS(WasmExportedFunctionData, packed_args_size, kPackedArgsSizeOffset)
-ACCESSORS(WasmExportedFunctionData, signature, Foreign, kSignatureOffset)
// WasmJSFunction
WasmJSFunction::WasmJSFunction(Address ptr) : JSFunction(ptr) {
@@ -352,6 +356,8 @@ ACCESSORS(WasmJSFunctionData, serialized_signature, PodArray<wasm::ValueType>,
kSerializedSignatureOffset)
ACCESSORS(WasmJSFunctionData, callable, JSReceiver, kCallableOffset)
ACCESSORS(WasmJSFunctionData, wrapper_code, Code, kWrapperCodeOffset)
+ACCESSORS(WasmJSFunctionData, wasm_to_js_wrapper_code, Code,
+ kWasmToJsWrapperCodeOffset)
// WasmCapiFunction
WasmCapiFunction::WasmCapiFunction(Address ptr) : JSFunction(ptr) {
@@ -359,16 +365,6 @@ WasmCapiFunction::WasmCapiFunction(Address ptr) : JSFunction(ptr) {
}
CAST_ACCESSOR(WasmCapiFunction)
-// WasmCapiFunctionData
-OBJECT_CONSTRUCTORS_IMPL(WasmCapiFunctionData, Struct)
-CAST_ACCESSOR(WasmCapiFunctionData)
-PRIMITIVE_ACCESSORS(WasmCapiFunctionData, call_target, Address,
- kCallTargetOffset)
-ACCESSORS(WasmCapiFunctionData, embedder_data, Foreign, kEmbedderDataOffset)
-ACCESSORS(WasmCapiFunctionData, wrapper_code, Code, kWrapperCodeOffset)
-ACCESSORS(WasmCapiFunctionData, serialized_signature, PodArray<wasm::ValueType>,
- kSerializedSignatureOffset)
-
// WasmExternalFunction
WasmExternalFunction::WasmExternalFunction(Address ptr) : JSFunction(ptr) {
SLOW_DCHECK(IsWasmExternalFunction(*this));
@@ -451,6 +447,11 @@ int WasmArray::SizeFor(Map map, int length) {
}
void WasmTypeInfo::clear_foreign_address(Isolate* isolate) {
+#ifdef V8_HEAP_SANDBOX
+ // Due to the type-specific pointer tags for external pointers, we need to
+ // allocate an entry in the table here even though it will just store nullptr.
+ AllocateExternalPointerEntries(isolate);
+#endif
set_foreign_address(isolate, 0);
}
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index cf78ab5ff3..d06caef486 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -1508,10 +1508,15 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
callable = resolved.second; // Update to ultimate target.
DCHECK_NE(compiler::WasmImportCallKind::kLinkError, kind);
wasm::CompilationEnv env = native_module->CreateCompilationEnv();
- SharedFunctionInfo shared = js_function->shared();
+ // {expected_arity} should only be used if kind != kJSFunctionArityMismatch.
+ int expected_arity = -1;
+ if (kind == compiler::WasmImportCallKind ::kJSFunctionArityMismatch) {
+ expected_arity = Handle<JSFunction>::cast(callable)
+ ->shared()
+ .internal_formal_parameter_count();
+ }
wasm::WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
- isolate->wasm_engine(), &env, kind, sig, false,
- shared.internal_formal_parameter_count());
+ isolate->wasm_engine(), &env, kind, sig, false, expected_arity);
std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
result.func_index, result.code_desc, result.frame_slot_count,
result.tagged_parameter_slots,
@@ -1674,6 +1679,7 @@ Handle<WasmExceptionObject> WasmExceptionObject::New(
return exception;
}
+// TODO(9495): Update this if function type variance is introduced.
bool WasmExceptionObject::MatchesSignature(const wasm::FunctionSig* sig) {
DCHECK_EQ(0, sig->return_count());
DCHECK_LE(sig->parameter_count(), std::numeric_limits<int>::max());
@@ -1687,6 +1693,7 @@ bool WasmExceptionObject::MatchesSignature(const wasm::FunctionSig* sig) {
return true;
}
+// TODO(9495): Update this if function type variance is introduced.
bool WasmCapiFunction::MatchesSignature(const wasm::FunctionSig* sig) const {
// TODO(jkummerow): Unify with "SignatureHelper" in c-api.cc.
int param_count = static_cast<int>(sig->parameter_count());
@@ -1833,16 +1840,14 @@ bool WasmCapiFunction::IsWasmCapiFunction(Object object) {
Handle<WasmCapiFunction> WasmCapiFunction::New(
Isolate* isolate, Address call_target, Handle<Foreign> embedder_data,
Handle<PodArray<wasm::ValueType>> serialized_signature) {
- Handle<WasmCapiFunctionData> fun_data =
- Handle<WasmCapiFunctionData>::cast(isolate->factory()->NewStruct(
- WASM_CAPI_FUNCTION_DATA_TYPE, AllocationType::kOld));
- fun_data->set_call_target(call_target);
- fun_data->set_embedder_data(*embedder_data);
- fun_data->set_serialized_signature(*serialized_signature);
// TODO(jkummerow): Install a JavaScript wrapper. For now, calling
// these functions directly is unsupported; they can only be called
// from Wasm code.
- fun_data->set_wrapper_code(isolate->builtins()->builtin(Builtins::kIllegal));
+ Handle<WasmCapiFunctionData> fun_data =
+ isolate->factory()->NewWasmCapiFunctionData(
+ call_target, embedder_data,
+ isolate->builtins()->builtin_handle(Builtins::kIllegal),
+ serialized_signature, AllocationType::kOld);
Handle<SharedFunctionInfo> shared =
isolate->factory()->NewSharedFunctionInfoForWasmCapiFunction(fun_data);
return Handle<WasmCapiFunction>::cast(
@@ -1884,10 +1889,11 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
function_data->set_instance(*instance);
function_data->set_jump_table_offset(jump_table_offset);
function_data->set_function_index(func_index);
+ function_data->set_signature(*sig_foreign);
+ function_data->set_call_count(0);
function_data->set_c_wrapper_code(Smi::zero(), SKIP_WRITE_BARRIER);
function_data->set_wasm_call_target(Smi::zero(), SKIP_WRITE_BARRIER);
function_data->set_packed_args_size(0);
- function_data->set_signature(*sig_foreign);
MaybeHandle<String> maybe_name;
bool is_asm_js_module = instance->module_object().is_asm_js();
@@ -1948,6 +1954,23 @@ const wasm::FunctionSig* WasmExportedFunction::sig() {
return instance().module()->functions[function_index()].sig;
}
+bool WasmExportedFunction::MatchesSignature(
+ const WasmModule* other_module, const wasm::FunctionSig* other_sig) {
+ const wasm::FunctionSig* sig = this->sig();
+ if (sig->parameter_count() != other_sig->parameter_count() ||
+ sig->return_count() != other_sig->return_count()) {
+ return false;
+ }
+
+ for (int i = 0; i < sig->all().size(); i++) {
+ if (!wasm::EquivalentTypes(sig->all()[i], other_sig->all()[i],
+ this->instance().module(), other_module)) {
+ return false;
+ }
+ }
+ return true;
+}
+
// static
bool WasmJSFunction::IsWasmJSFunction(Object object) {
if (!object.IsJSFunction()) return false;
@@ -1955,8 +1978,6 @@ bool WasmJSFunction::IsWasmJSFunction(Object object) {
return js_function.shared().HasWasmJSFunctionData();
}
-// TODO(7748): WasmJSFunctions should compile/find and store an import wrapper
-// in case they are called from within wasm.
Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
const wasm::FunctionSig* sig,
Handle<JSReceiver> callable) {
@@ -1973,6 +1994,7 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
// signature instead of compiling a new one for every instantiation.
Handle<Code> wrapper_code =
compiler::CompileJSToJSWrapper(isolate, sig, nullptr).ToHandleChecked();
+
Handle<WasmJSFunctionData> function_data =
Handle<WasmJSFunctionData>::cast(isolate->factory()->NewStruct(
WASM_JS_FUNCTION_DATA_TYPE, AllocationType::kOld));
@@ -1981,6 +2003,30 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
function_data->set_serialized_signature(*serialized_sig);
function_data->set_callable(*callable);
function_data->set_wrapper_code(*wrapper_code);
+ // Use Abort() as a default value (it will never be called if not overwritten
+ // below).
+ function_data->set_wasm_to_js_wrapper_code(
+ isolate->heap()->builtin(Builtins::kAbort));
+
+ if (wasm::WasmFeatures::FromIsolate(isolate).has_typed_funcref()) {
+ using CK = compiler::WasmImportCallKind;
+ int expected_arity = parameter_count;
+ CK kind = compiler::kDefaultImportCallKind;
+ if (callable->IsJSFunction()) {
+ SharedFunctionInfo shared = Handle<JSFunction>::cast(callable)->shared();
+ expected_arity = shared.internal_formal_parameter_count();
+ if (expected_arity != parameter_count) {
+ kind = CK::kJSFunctionArityMismatch;
+ }
+ }
+ // TODO(wasm): Think about caching and sharing the wasm-to-JS wrappers per
+ // signature instead of compiling a new one for every instantiation.
+ Handle<Code> wasm_to_js_wrapper_code =
+ compiler::CompileWasmToJSWrapper(isolate, sig, kind, expected_arity)
+ .ToHandleChecked();
+ function_data->set_wasm_to_js_wrapper_code(*wasm_to_js_wrapper_code);
+ }
+
Handle<String> name = isolate->factory()->Function_string();
if (callable->IsJSFunction()) {
name = JSFunction::GetName(Handle<JSFunction>::cast(callable));
@@ -2012,6 +2058,7 @@ const wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) {
return zone->New<wasm::FunctionSig>(return_count, parameter_count, types);
}
+// TODO(9495): Update this if function type variance is introduced.
bool WasmJSFunction::MatchesSignature(const wasm::FunctionSig* sig) {
DCHECK_LE(sig->all().size(), kMaxInt);
int sig_size = static_cast<int>(sig->all().size());
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index d269c8df4f..dcef1aec8b 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -16,7 +16,6 @@
#include "src/objects/objects.h"
#include "src/wasm/struct-types.h"
#include "src/wasm/value-type.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards)
#include "src/objects/object-macros.h"
@@ -44,13 +43,14 @@ class WasmExternalFunction;
class WasmInstanceObject;
class WasmJSFunction;
class WasmModuleObject;
-class WasmIndirectFunctionTable;
enum class SharedFlag : uint8_t;
template <class CppType>
class Managed;
+#include "torque-generated/src/wasm/wasm-objects-tq.inc"
+
#define DECL_OPTIONAL_ACCESSORS(name, type) \
DECL_GETTER(has_##name, bool) \
DECL_ACCESSORS(name, type)
@@ -666,6 +666,9 @@ class WasmExportedFunction : public JSFunction {
V8_EXPORT_PRIVATE const wasm::FunctionSig* sig();
+ bool MatchesSignature(const wasm::WasmModule* other_module,
+ const wasm::FunctionSig* other_sig);
+
DECL_CAST(WasmExportedFunction)
OBJECT_CONSTRUCTORS(WasmExportedFunction, JSFunction);
};
@@ -750,27 +753,6 @@ class WasmIndirectFunctionTable : public Struct {
OBJECT_CONSTRUCTORS(WasmIndirectFunctionTable, Struct);
};
-class WasmCapiFunctionData : public Struct {
- public:
- DECL_PRIMITIVE_ACCESSORS(call_target, Address)
- DECL_ACCESSORS(embedder_data, Foreign)
- DECL_ACCESSORS(wrapper_code, Code)
- DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
-
- DECL_CAST(WasmCapiFunctionData)
-
- DECL_PRINTER(WasmCapiFunctionData)
- DECL_VERIFIER(WasmCapiFunctionData)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_WASM_CAPI_FUNCTION_DATA_FIELDS)
-
- STATIC_ASSERT(kStartOfStrongFieldsOffset == kEmbedderDataOffset);
- using BodyDescriptor = FlexibleBodyDescriptor<kStartOfStrongFieldsOffset>;
-
- OBJECT_CONSTRUCTORS(WasmCapiFunctionData, Struct);
-};
-
// Information for a WasmExportedFunction which is referenced as the function
// data of the SharedFunctionInfo underlying the function. For details please
// see the {SharedFunctionInfo::HasWasmExportedFunctionData} predicate.
@@ -780,10 +762,11 @@ class WasmExportedFunctionData : public Struct {
DECL_ACCESSORS(instance, WasmInstanceObject)
DECL_INT_ACCESSORS(jump_table_offset)
DECL_INT_ACCESSORS(function_index)
+ DECL_ACCESSORS(signature, Foreign)
+ DECL_INT_ACCESSORS(call_count)
DECL_ACCESSORS(c_wrapper_code, Object)
DECL_ACCESSORS(wasm_call_target, Object)
DECL_INT_ACCESSORS(packed_args_size)
- DECL_ACCESSORS(signature, Foreign)
DECL_CAST(WasmExportedFunctionData)
@@ -809,6 +792,7 @@ class WasmJSFunctionData : public Struct {
DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
DECL_ACCESSORS(callable, JSReceiver)
DECL_ACCESSORS(wrapper_code, Code)
+ DECL_ACCESSORS(wasm_to_js_wrapper_code, Code)
DECL_CAST(WasmJSFunctionData)
diff --git a/deps/v8/src/wasm/wasm-objects.tq b/deps/v8/src/wasm/wasm-objects.tq
index bd1fdfd783..fc5cfd6985 100644
--- a/deps/v8/src/wasm/wasm-objects.tq
+++ b/deps/v8/src/wasm/wasm-objects.tq
@@ -2,10 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+@useParentTypeChecker
type PodArrayOfWasmValueType extends ByteArray
-constexpr 'PodArray<wasm::ValueType>';
+ constexpr 'PodArray<wasm::ValueType>';
+@useParentTypeChecker
type ManagedWasmNativeModule extends Foreign
-constexpr 'Managed<wasm::NativeModule>';
+ constexpr 'Managed<wasm::NativeModule>';
type WasmValueType extends uint8 constexpr 'wasm::ValueType::Kind';
extern class WasmInstanceObject extends JSObject;
@@ -15,23 +17,26 @@ extern class WasmExportedFunctionData extends Struct {
instance: WasmInstanceObject;
jump_table_offset: Smi;
function_index: Smi;
+ signature: Foreign;
+ call_count: Smi;
// The remaining fields are for fast calling from C++. The contract is
// that they are lazily populated, and either all will be present or none.
c_wrapper_code: Object;
wasm_call_target: Smi|Foreign;
packed_args_size: Smi;
- signature: Foreign;
}
extern class WasmJSFunctionData extends Struct {
callable: JSReceiver;
wrapper_code: Code;
+ wasm_to_js_wrapper_code: Code;
serialized_return_count: Smi;
serialized_parameter_count: Smi;
serialized_signature: PodArrayOfWasmValueType;
}
-extern class WasmCapiFunctionData extends Struct {
+@export
+class WasmCapiFunctionData extends HeapObject {
call_target: RawPtr;
embedder_data: Foreign; // Managed<wasm::FuncData>
wrapper_code: Code;
diff --git a/deps/v8/src/wasm/wasm-opcodes-inl.h b/deps/v8/src/wasm/wasm-opcodes-inl.h
index e050d12947..5e0f172bd5 100644
--- a/deps/v8/src/wasm/wasm-opcodes-inl.h
+++ b/deps/v8/src/wasm/wasm-opcodes-inl.h
@@ -35,7 +35,6 @@ namespace wasm {
#define CASE_S64x2_OP(name, str) CASE_OP(S64x2##name, "s64x2." str)
#define CASE_S32x4_OP(name, str) CASE_OP(S32x4##name, "s32x4." str)
#define CASE_S16x8_OP(name, str) CASE_OP(S16x8##name, "s16x8." str)
-#define CASE_V64x2_OP(name, str) CASE_OP(V64x2##name, "v64x2." str)
#define CASE_V32x4_OP(name, str) CASE_OP(V32x4##name, "v32x4." str)
#define CASE_V16x8_OP(name, str) CASE_OP(V16x8##name, "v16x8." str)
#define CASE_V8x16_OP(name, str) CASE_OP(V8x16##name, "v8x16." str)
@@ -235,7 +234,8 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIMD_OP(Neg, "neg")
CASE_SIMDF_OP(Sqrt, "sqrt")
CASE_SIMD_OP(Eq, "eq")
- CASE_SIMD_OP(Ne, "ne")
+ CASE_SIMDF_OP(Ne, "ne")
+ CASE_SIMDI_OP(Ne, "ne")
CASE_SIMD_OP(Add, "add")
CASE_SIMD_OP(Sub, "sub")
CASE_SIMD_OP(Mul, "mul")
@@ -267,27 +267,23 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIGN_OP(I8x16, ExtractLane, "extract_lane")
CASE_SIMDI_OP(ReplaceLane, "replace_lane")
CASE_SIGN_OP(SIMDI, Min, "min")
- CASE_SIGN_OP(I64x2, Min, "min")
CASE_SIGN_OP(SIMDI, Max, "max")
- CASE_SIGN_OP(I64x2, Max, "max")
CASE_SIGN_OP(SIMDI, Lt, "lt")
- CASE_SIGN_OP(I64x2, Lt, "lt")
CASE_SIGN_OP(SIMDI, Le, "le")
- CASE_SIGN_OP(I64x2, Le, "le")
CASE_SIGN_OP(SIMDI, Gt, "gt")
- CASE_SIGN_OP(I64x2, Gt, "gt")
CASE_SIGN_OP(SIMDI, Ge, "ge")
- CASE_SIGN_OP(I64x2, Ge, "ge")
+ CASE_CONVERT_OP(Convert, I64x2, I32x4Low, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I64x2, I32x4High, "i32", "convert")
CASE_SIGN_OP(SIMDI, Shr, "shr")
CASE_SIGN_OP(I64x2, Shr, "shr")
CASE_SIMDI_OP(Shl, "shl")
CASE_I64x2_OP(Shl, "shl")
CASE_I32x4_OP(AddHoriz, "add_horizontal")
CASE_I16x8_OP(AddHoriz, "add_horizontal")
- CASE_SIGN_OP(I16x8, AddSaturate, "add_saturate")
- CASE_SIGN_OP(I8x16, AddSaturate, "add_saturate")
- CASE_SIGN_OP(I16x8, SubSaturate, "sub_saturate")
- CASE_SIGN_OP(I8x16, SubSaturate, "sub_saturate")
+ CASE_SIGN_OP(I16x8, AddSat, "add_sat")
+ CASE_SIGN_OP(I8x16, AddSat, "add_sat")
+ CASE_SIGN_OP(I16x8, SubSat, "sub_sat")
+ CASE_SIGN_OP(I8x16, SubSat, "sub_sat")
CASE_S128_OP(And, "and")
CASE_S128_OP(Or, "or")
CASE_S128_OP(Xor, "xor")
@@ -298,13 +294,11 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I8x16_OP(Shuffle, "shuffle")
CASE_SIMDV_OP(AnyTrue, "any_true")
CASE_SIMDV_OP(AllTrue, "all_true")
- CASE_V64x2_OP(AnyTrue, "any_true")
- CASE_V64x2_OP(AllTrue, "all_true")
CASE_SIMDF_OP(Qfma, "qfma")
CASE_SIMDF_OP(Qfms, "qfms")
- CASE_S128_OP(LoadMem32Zero, "load32_zero")
- CASE_S128_OP(LoadMem64Zero, "load64_zero")
+ CASE_S128_OP(Load32Zero, "load32_zero")
+ CASE_S128_OP(Load64Zero, "load64_zero")
CASE_S128_OP(Load8Splat, "load8_splat")
CASE_S128_OP(Load16Splat, "load16_splat")
CASE_S128_OP(Load32Splat, "load32_splat")
@@ -315,17 +309,28 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S128_OP(Load16x4U, "load16x4_u")
CASE_S128_OP(Load32x2S, "load32x2_s")
CASE_S128_OP(Load32x2U, "load32x2_u")
+ CASE_S128_OP(Load8Lane, "load8_lane")
+ CASE_S128_OP(Load16Lane, "load16_lane")
+ CASE_S128_OP(Load32Lane, "load32_lane")
+ CASE_S128_OP(Load64Lane, "load64_lane")
+ CASE_S128_OP(Store8Lane, "store8_lane")
+ CASE_S128_OP(Store16Lane, "store16_lane")
+ CASE_S128_OP(Store32Lane, "store32_lane")
+ CASE_S128_OP(Store64Lane, "store64_lane")
CASE_I8x16_OP(RoundingAverageU, "avgr_u")
CASE_I16x8_OP(RoundingAverageU, "avgr_u")
+ CASE_I16x8_OP(Q15MulRSatS, "q15mulr_sat_s")
CASE_I8x16_OP(Abs, "abs")
+ CASE_I8x16_OP(Popcnt, "popcnt")
CASE_I16x8_OP(Abs, "abs")
CASE_I32x4_OP(Abs, "abs")
CASE_I8x16_OP(BitMask, "bitmask")
CASE_I16x8_OP(BitMask, "bitmask")
CASE_I32x4_OP(BitMask, "bitmask")
+ CASE_I64x2_OP(BitMask, "bitmask")
CASE_F32x4_OP(Pmin, "pmin")
CASE_F32x4_OP(Pmax, "pmax")
@@ -343,6 +348,18 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I32x4_OP(DotI16x8S, "dot_i16x8_s")
+ CASE_SIGN_OP(I16x8, ExtMulLowI8x16, "extmul_low_i8x16")
+ CASE_SIGN_OP(I16x8, ExtMulHighI8x16, "extmul_high_i8x16")
+ CASE_SIGN_OP(I32x4, ExtMulLowI16x8, "extmul_low_i16x8")
+ CASE_SIGN_OP(I32x4, ExtMulHighI16x8, "extmul_high_i16x8")
+ CASE_SIGN_OP(I64x2, ExtMulLowI32x4, "extmul_low_i32x4")
+ CASE_SIGN_OP(I64x2, ExtMulHighI32x4, "extmul_high_i32x4")
+ CASE_SIMDI_OP(SignSelect, "signselect")
+ CASE_I64x2_OP(SignSelect, "signselect")
+
+ CASE_SIGN_OP(I32x4, ExtAddPairwiseI16x8, "extadd_pairwise_i16x8")
+ CASE_SIGN_OP(I16x8, ExtAddPairwiseI8x16, "extadd_pairwise_i8x6")
+
// Atomic operations.
CASE_OP(AtomicNotify, "atomic.notify")
CASE_INT_OP(AtomicWait, "atomic.wait")
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 04767f53a2..76812446a9 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -21,8 +21,9 @@ class WasmFeatures;
struct WasmModule;
std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
-bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
- const WasmFeatures&);
+bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
+ const WasmModule* module,
+ const WasmFeatures&);
// Control expressions and blocks.
#define FOREACH_CONTROL_OPCODE(V) \
@@ -287,7 +288,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(S128Load16Splat, 0xfd08, s_i) \
V(S128Load32Splat, 0xfd09, s_i) \
V(S128Load64Splat, 0xfd0a, s_i) \
- V(S128StoreMem, 0xfd0b, v_is)
+ V(S128StoreMem, 0xfd0b, v_is) \
+ V(S128Load32Zero, 0xfdfc, s_i) \
+ V(S128Load64Zero, 0xfdfd, s_i)
#define FOREACH_SIMD_CONST_OPCODE(V) V(S128Const, 0xfd0c, _)
@@ -360,11 +363,11 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(I8x16ShrS, 0xfd6c, s_si) \
V(I8x16ShrU, 0xfd6d, s_si) \
V(I8x16Add, 0xfd6e, s_ss) \
- V(I8x16AddSaturateS, 0xfd6f, s_ss) \
- V(I8x16AddSaturateU, 0xfd70, s_ss) \
+ V(I8x16AddSatS, 0xfd6f, s_ss) \
+ V(I8x16AddSatU, 0xfd70, s_ss) \
V(I8x16Sub, 0xfd71, s_ss) \
- V(I8x16SubSaturateS, 0xfd72, s_ss) \
- V(I8x16SubSaturateU, 0xfd73, s_ss) \
+ V(I8x16SubSatS, 0xfd72, s_ss) \
+ V(I8x16SubSatU, 0xfd73, s_ss) \
V(I8x16MinS, 0xfd76, s_ss) \
V(I8x16MinU, 0xfd77, s_ss) \
V(I8x16MaxS, 0xfd78, s_ss) \
@@ -385,11 +388,11 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(I16x8ShrS, 0xfd8c, s_si) \
V(I16x8ShrU, 0xfd8d, s_si) \
V(I16x8Add, 0xfd8e, s_ss) \
- V(I16x8AddSaturateS, 0xfd8f, s_ss) \
- V(I16x8AddSaturateU, 0xfd90, s_ss) \
+ V(I16x8AddSatS, 0xfd8f, s_ss) \
+ V(I16x8AddSatU, 0xfd90, s_ss) \
V(I16x8Sub, 0xfd91, s_ss) \
- V(I16x8SubSaturateS, 0xfd92, s_ss) \
- V(I16x8SubSaturateU, 0xfd93, s_ss) \
+ V(I16x8SubSatS, 0xfd92, s_ss) \
+ V(I16x8SubSatU, 0xfd93, s_ss) \
V(I16x8Mul, 0xfd95, s_ss) \
V(I16x8MinS, 0xfd96, s_ss) \
V(I16x8MinU, 0xfd97, s_ss) \
@@ -415,6 +418,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(I32x4MinU, 0xfdb7, s_ss) \
V(I32x4MaxS, 0xfdb8, s_ss) \
V(I32x4MaxU, 0xfdb9, s_ss) \
+ V(I32x4DotI16x8S, 0xfdba, s_ss) \
V(I64x2Neg, 0xfdc1, s_s) \
V(I64x2Shl, 0xfdcb, s_si) \
V(I64x2ShrS, 0xfdcc, s_si) \
@@ -458,36 +462,53 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(F64x2NearestInt, 0xfddf, s_s)
#define FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \
- V(S128LoadMem32Zero, 0xfdfc, s_i) \
- V(S128LoadMem64Zero, 0xfdfd, s_i)
-
-#define FOREACH_SIMD_POST_MVP_OPCODE(V) \
- V(I8x16Mul, 0xfd75, s_ss) \
- V(V64x2AnyTrue, 0xfdc2, i_s) \
- V(V64x2AllTrue, 0xfdc3, i_s) \
- V(I64x2Eq, 0xfdc0, s_ss) \
- V(I64x2Ne, 0xfdc4, s_ss) \
- V(I64x2LtS, 0xfdc5, s_ss) \
- V(I64x2LtU, 0xfdc6, s_ss) \
- V(I64x2GtS, 0xfdc7, s_ss) \
- V(I64x2GtU, 0xfdc8, s_ss) \
- V(I64x2LeS, 0xfdc9, s_ss) \
- V(I64x2LeU, 0xfdca, s_ss) \
- V(I64x2GeS, 0xfdcf, s_ss) \
- V(I64x2GeU, 0xfdd0, s_ss) \
- V(I64x2MinS, 0xfdd6, s_ss) \
- V(I64x2MinU, 0xfdd7, s_ss) \
- V(I64x2MaxS, 0xfde2, s_ss) \
- V(I64x2MaxU, 0xfdee, s_ss) \
- V(F32x4Qfma, 0xfdb4, s_sss) \
- V(F32x4Qfms, 0xfdd4, s_sss) \
- V(F64x2Qfma, 0xfdfe, s_sss) \
- V(F64x2Qfms, 0xfdff, s_sss) \
- V(I16x8AddHoriz, 0xfdaf, s_ss) \
- V(I32x4AddHoriz, 0xfdb0, s_ss) \
- V(I32x4DotI16x8S, 0xfdba, s_ss) \
- V(F32x4AddHoriz, 0xfdb2, s_ss) \
- V(F32x4RecipApprox, 0xfdb3, s_s) \
+ V(S128Load8Lane, 0xfd58, s_is) \
+ V(S128Load16Lane, 0xfd59, s_is) \
+ V(S128Load32Lane, 0xfd5a, s_is) \
+ V(S128Load64Lane, 0xfd5b, s_is) \
+ V(S128Store8Lane, 0xfd5c, v_is) \
+ V(S128Store16Lane, 0xfd5d, v_is) \
+ V(S128Store32Lane, 0xfd5e, v_is) \
+ V(S128Store64Lane, 0xfd5f, v_is)
+
+#define FOREACH_SIMD_POST_MVP_OPCODE(V) \
+ V(I8x16Mul, 0xfd75, s_ss) \
+ V(I8x16Popcnt, 0xfd7c, s_s) \
+ V(I8x16SignSelect, 0xfd7d, s_sss) \
+ V(I16x8SignSelect, 0xfd7e, s_sss) \
+ V(I32x4SignSelect, 0xfd7f, s_sss) \
+ V(I64x2SignSelect, 0xfd94, s_sss) \
+ V(I16x8Q15MulRSatS, 0xfd9c, s_ss) \
+ V(I16x8ExtMulLowI8x16S, 0xfd9a, s_ss) \
+ V(I16x8ExtMulHighI8x16S, 0xfd9d, s_ss) \
+ V(I16x8ExtMulLowI8x16U, 0xfd9e, s_ss) \
+ V(I16x8ExtMulHighI8x16U, 0xfd9f, s_ss) \
+ V(I32x4ExtMulLowI16x8S, 0xfdbb, s_ss) \
+ V(I32x4ExtMulHighI16x8S, 0xfdbd, s_ss) \
+ V(I32x4ExtMulLowI16x8U, 0xfdbe, s_ss) \
+ V(I32x4ExtMulHighI16x8U, 0xfdbf, s_ss) \
+ V(I64x2ExtMulLowI32x4S, 0xfdd2, s_ss) \
+ V(I64x2ExtMulHighI32x4S, 0xfdd3, s_ss) \
+ V(I64x2ExtMulLowI32x4U, 0xfdd6, s_ss) \
+ V(I64x2ExtMulHighI32x4U, 0xfdd7, s_ss) \
+ V(I32x4ExtAddPairwiseI16x8S, 0xfda5, s_s) \
+ V(I32x4ExtAddPairwiseI16x8U, 0xfda6, s_s) \
+ V(I16x8ExtAddPairwiseI8x16S, 0xfdc2, s_s) \
+ V(I16x8ExtAddPairwiseI8x16U, 0xfdc3, s_s) \
+ V(I64x2Eq, 0xfdc0, s_ss) \
+ V(F32x4Qfma, 0xfdb4, s_sss) \
+ V(I64x2BitMask, 0xfdc4, i_s) \
+ V(I64x2SConvertI32x4Low, 0xfdc7, s_s) \
+ V(I64x2SConvertI32x4High, 0xfdc8, s_s) \
+ V(I64x2UConvertI32x4Low, 0xfdc9, s_s) \
+ V(I64x2UConvertI32x4High, 0xfdca, s_s) \
+ V(F32x4Qfms, 0xfdd4, s_sss) \
+ V(F64x2Qfma, 0xfdfe, s_sss) \
+ V(F64x2Qfms, 0xfdff, s_sss) \
+ V(I16x8AddHoriz, 0xfdaf, s_ss) \
+ V(I32x4AddHoriz, 0xfdb0, s_ss) \
+ V(F32x4AddHoriz, 0xfdb2, s_ss) \
+ V(F32x4RecipApprox, 0xfdb3, s_s) \
V(F32x4RecipSqrtApprox, 0xfdbc, s_s)
#define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
@@ -700,17 +721,18 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(i_ci, kWasmI32, kWasmFuncRef, kWasmI32) \
V(i_qq, kWasmI32, kWasmEqRef, kWasmEqRef)
-#define FOREACH_SIMD_SIGNATURE(V) \
- V(s_s, kWasmS128, kWasmS128) \
- V(s_f, kWasmS128, kWasmF32) \
- V(s_d, kWasmS128, kWasmF64) \
- V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
- V(s_i, kWasmS128, kWasmI32) \
- V(s_l, kWasmS128, kWasmI64) \
- V(s_si, kWasmS128, kWasmS128, kWasmI32) \
- V(i_s, kWasmI32, kWasmS128) \
- V(v_is, kWasmStmt, kWasmI32, kWasmS128) \
- V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128)
+#define FOREACH_SIMD_SIGNATURE(V) \
+ V(s_s, kWasmS128, kWasmS128) \
+ V(s_f, kWasmS128, kWasmF32) \
+ V(s_d, kWasmS128, kWasmF64) \
+ V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
+ V(s_i, kWasmS128, kWasmI32) \
+ V(s_l, kWasmS128, kWasmI64) \
+ V(s_si, kWasmS128, kWasmS128, kWasmI32) \
+ V(i_s, kWasmI32, kWasmS128) \
+ V(v_is, kWasmStmt, kWasmI32, kWasmS128) \
+ V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128) \
+ V(s_is, kWasmS128, kWasmI32, kWasmS128)
#define FOREACH_PREFIX(V) \
V(Numeric, 0xfc) \
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 784dd0f615..8f0d5427aa 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -63,6 +63,8 @@ template <typename T>
class Result {
public:
Result() = default;
+ Result(const Result&) = delete;
+ Result& operator=(const Result&) = delete;
template <typename S>
explicit Result(S&& value) : value_(std::forward<S>(value)) {}
@@ -104,8 +106,6 @@ class Result {
T value_ = T{};
WasmError error_;
-
- DISALLOW_COPY_AND_ASSIGN(Result);
};
// A helper for generating error messages that bubble up to JS exceptions.
@@ -113,8 +113,10 @@ class V8_EXPORT_PRIVATE ErrorThrower {
public:
ErrorThrower(Isolate* isolate, const char* context)
: isolate_(isolate), context_(context) {}
- // Explicitly allow move-construction. Disallow copy (below).
+ // Explicitly allow move-construction. Disallow copy.
ErrorThrower(ErrorThrower&& other) V8_NOEXCEPT;
+ ErrorThrower(const ErrorThrower&) = delete;
+ ErrorThrower& operator=(const ErrorThrower&) = delete;
~ErrorThrower();
PRINTF_FORMAT(2, 3) void TypeError(const char* fmt, ...);
@@ -165,7 +167,6 @@ class V8_EXPORT_PRIVATE ErrorThrower {
// ErrorThrower should always be stack-allocated, since it constitutes a scope
// (things happen in the destructor).
DISALLOW_NEW_AND_DELETE()
- DISALLOW_COPY_AND_ASSIGN(ErrorThrower);
};
// Like an ErrorThrower, but turns all pending exceptions into scheduled
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index f4f5f99268..1c73fc5c41 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -208,6 +208,9 @@ constexpr size_t kCodeHeaderSize = sizeof(bool) + // whether code is present
// a tag from the Address of an external reference and vice versa.
class ExternalReferenceList {
public:
+ ExternalReferenceList(const ExternalReferenceList&) = delete;
+ ExternalReferenceList& operator=(const ExternalReferenceList&) = delete;
+
uint32_t tag_from_address(Address ext_ref_address) const {
auto tag_addr_less_than = [this](uint32_t tag, Address searched_addr) {
return external_reference_by_tag_[tag] < searched_addr;
@@ -263,7 +266,6 @@ class ExternalReferenceList {
#undef RUNTIME_ADDR
};
uint32_t tags_ordered_by_address_[kNumExternalReferences];
- DISALLOW_COPY_AND_ASSIGN(ExternalReferenceList);
};
static_assert(std::is_trivially_destructible<ExternalReferenceList>::value,
@@ -273,8 +275,9 @@ static_assert(std::is_trivially_destructible<ExternalReferenceList>::value,
class V8_EXPORT_PRIVATE NativeModuleSerializer {
public:
- NativeModuleSerializer() = delete;
NativeModuleSerializer(const NativeModule*, Vector<WasmCode* const>);
+ NativeModuleSerializer(const NativeModuleSerializer&) = delete;
+ NativeModuleSerializer& operator=(const NativeModuleSerializer&) = delete;
size_t Measure() const;
bool Write(Writer* writer);
@@ -287,8 +290,6 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
const NativeModule* const native_module_;
Vector<WasmCode* const> code_table_;
bool write_called_;
-
- DISALLOW_COPY_AND_ASSIGN(NativeModuleSerializer);
};
NativeModuleSerializer::NativeModuleSerializer(
@@ -468,8 +469,9 @@ bool WasmSerializer::SerializeNativeModule(Vector<byte> buffer) const {
class V8_EXPORT_PRIVATE NativeModuleDeserializer {
public:
- NativeModuleDeserializer() = delete;
explicit NativeModuleDeserializer(NativeModule*);
+ NativeModuleDeserializer(const NativeModuleDeserializer&) = delete;
+ NativeModuleDeserializer& operator=(const NativeModuleDeserializer&) = delete;
bool Read(Reader* reader);
@@ -479,8 +481,6 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
NativeModule* const native_module_;
bool read_called_;
-
- DISALLOW_COPY_AND_ASSIGN(NativeModuleDeserializer);
};
NativeModuleDeserializer::NativeModuleDeserializer(NativeModule* native_module)
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index 3926a4c7e9..81dbd3e9cb 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -46,10 +46,21 @@ class Simd128 {
const uint8_t* bytes() { return val_; }
+ template <typename T>
+ inline T to();
+
private:
uint8_t val_[16] = {0};
};
+#define DECLARE_CAST(cType, sType, name, size) \
+ template <> \
+ inline sType Simd128::to() { \
+ return to_##name(); \
+ }
+FOREACH_SIMD_TYPE(DECLARE_CAST)
+#undef DECLARE_CAST
+
// Macro for defining WasmValue methods for different types.
// Elements:
// - name (for to_<name>() method)
diff --git a/deps/v8/src/zone/zone-containers.h b/deps/v8/src/zone/zone-containers.h
index 638ddd2213..c2b4dfa703 100644
--- a/deps/v8/src/zone/zone-containers.h
+++ b/deps/v8/src/zone/zone-containers.h
@@ -176,9 +176,9 @@ class ZoneUnorderedSet
: public std::unordered_set<K, Hash, KeyEqual, ZoneAllocator<K>> {
public:
// Constructs an empty map.
- explicit ZoneUnorderedSet(Zone* zone)
+ explicit ZoneUnorderedSet(Zone* zone, size_t bucket_count = 100)
: std::unordered_set<K, Hash, KeyEqual, ZoneAllocator<K>>(
- 100, Hash(), KeyEqual(), ZoneAllocator<K>(zone)) {}
+ bucket_count, Hash(), KeyEqual(), ZoneAllocator<K>(zone)) {}
};
// A wrapper subclass for std::multimap to make it easy to construct one that